gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#!/usr/bin/env python
# Given a certificate chain that the log won't accept, try to fix it up
# into one that will be accepted.
# Based on pyasn1 example code.
from __future__ import print_function
from base64 import b64encode
from ct.crypto.pem import PemError
from ct.crypto.pem import from_pem
from pyasn1 import debug
# Why doesn't this work?
#from pyasn1.codec.ber import stDumpRawValue
from pyasn1.codec.der import decoder
from pyasn1.codec.der import encoder
from pyasn1.error import PyAsn1Error
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc2459
from pyasn1_modules import rfc2315
import sys
from urllib2 import urlopen
if len(sys.argv) != 2:
print("""Usage:
$ %s somecertificates.pem""" % sys.argv[0])
sys.exit(-1)
cStart = '-----BEGIN CERTIFICATE-----'
cEnd = '-----END CERTIFICATE-----'
certType = rfc2459.Certificate()
# RFC 2459 is not sufficient for X509v3 certificates, extra stuff here.
# RFC 5280 4.2.2.1
id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
class AccessDescription(univ.Sequence):
"""
AccessDescription ::= SEQUENCE {
accessMethod OBJECT IDENTIFIER,
accessLocation GeneralName }
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
namedtype.NamedType('accessLocation', rfc2459.GeneralName()))
class AuthorityInfoAccessSyntax(univ.SequenceOf):
"""
AuthorityInfoAccessSyntax ::=
SEQUENCE SIZE (1..MAX) OF AccessDescription
"""
# FIXME: SIZE not encoded.
componentType = AccessDescription()
id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
# End of RFC 5280 4.2.2.1
def getIssuersFromAIA(cert):
tbs = cert.getComponentByName('tbsCertificate')
extensions = tbs.getComponentByName('extensions') or []
allIssuers = []
for extension in extensions:
oid = extension.getComponentByName('extnID')
if oid != id_pe_authorityInfoAccess:
continue
print(extension.prettyPrint())
value, rest = decoder.decode(extension.getComponentByName('extnValue'),
asn1Spec=univ.OctetString())
assert rest == ""
aia, rest = decoder.decode(value, asn1Spec=AuthorityInfoAccessSyntax())
assert rest == ""
print(aia.prettyPrint())
for ad in aia:
oid = ad.getComponentByName('accessMethod')
if oid != id_ad_caIssuers:
continue
print(ad.prettyPrint())
loc = ad.getComponentByName('accessLocation').\
getComponentByName('uniformResourceIdentifier')
print(type(loc), loc)
certHandle = urlopen(str(loc))
# RFC 5280 says this should either be 'application/pkix-cert' or
# 'application/pkcs7-mime' (in which case the result should be a
# "certs-only" PCKS#7 response, as specified in RFC 2797). Of
# course, we see other values, so just try both formats.
print(certHandle.info().gettype())
issuer = certHandle.read()
# Have we got an (incorrect, but let's fix it) PEM encoded cert?
if issuer.startswith('-----'):
try:
(issuer, _) = from_pem(issuer, ['CERTIFICATE'])
except PemError as e:
print("PEM decode failed:", e)
print("For cert:", issuer)
# Is it a certificate?
try:
cert, rest = decoder.decode(issuer, asn1Spec=certType)
assert rest == ""
allIssuers.append(cert)
continue
except PyAsn1Error as e:
# On failure, try the next thing
print("Cert decode failed:", e)
pass
# If not, it had better be PKCS#7 "certs-only"
try:
pkcs7, rest = decoder.decode(issuer, asn1Spec=rfc2315.ContentInfo())
assert rest == ""
assert pkcs7.getComponentByName('contentType') == rfc2315.signedData
signedData = decoder.decode(pkcs7.getComponentByName('content'),
asn1Spec=rfc2315.SignedData())
except PyAsn1Error as e:
# Give up
print("PKCS#7 decode also failed:", e)
print("Skipping issuer URL:", loc)
continue
for signedDatum in signedData:
# FIXME: why does this happen? Example is at
# http://crt.usertrust.com/AddTrustExternalCARoot.p7c.
if signedDatum == '':
print("** Skipping strange Any('') in PKCS7 **")
continue
certs = signedDatum.getComponentByName('certificates')
for c in certs:
cert = c.getComponentByName('certificate')
allIssuers.append(cert)
return allIssuers
# Note that this is a non-standard encoding of the DN, but unlike the
# standard encoding it captures nesting information. That is,
# attributes that are within a single RelativeDistinguishedName are
# surrounded by [].
def DNToString(dn):
rdns = dn.getComponent()
ret = ''
for rdn in rdns:
ret += '['
for attr in rdn:
attrType = attr.getComponentByName('type')
if attrType == rfc2459.emailAddress:
val, rest = decoder.decode(attr.getComponentByName('value'),
asn1Spec=rfc2459.Pkcs9email())
assert rest == ""
# Strictly speaking, this is IA5, not ASCII.
val = str(val).decode('ascii')
else:
val, rest = decoder.decode(attr.getComponentByName('value'),
asn1Spec=rfc2459.X520name())
assert rest == ""
valt = val.getName()
val = val.getComponent()
if valt == 'printableString':
val = str(val)
elif valt == 'teletexString':
# Strictly this is a T.61 string. T.61 no longer exists as a
# standard and some certs mark ISO 8859-1 as
# teletexString. And we should never see this, but we do.
val = str(val).decode('iso8859-1')
elif valt == 'utf8String':
val = str(val)
else:
print(valt)
assert False
assert val is not None
ret += '/' + str(attrType) + '=' + val
ret += ']'
return ret
certs = {}
inChain = []
certfile = open(sys.argv[1])
while 1:
idx, substrate = pem.readPemBlocksFromFile(certfile, (cStart, cEnd))
if not substrate:
break
cert, rest = decoder.decode(substrate, asn1Spec=certType)
assert rest == ""
tbs = cert.getComponentByName('tbsCertificate')
subjectDN = tbs.getComponentByName('subject')
print(DNToString(subjectDN))
certs[DNToString(subjectDN)] = cert
inChain.append(cert)
#for subject, cert in certs.iteritems():
# print subject
# Assume the first cert in the chain is the final cert
outChain = [inChain[0]]
while True:
assert len(outChain) < 100
cert = outChain[-1]
tbs = cert.getComponentByName('tbsCertificate')
subjectDN = tbs.getComponentByName('subject')
print('subject:', DNToString(subjectDN))
issuerDN = tbs.getComponentByName('issuer')
#print issuerDN.prettyPrint()
issuerDNstr = DNToString(issuerDN)
print('issuer:', issuerDNstr)
print()
if issuerDN == subjectDN:
break
if issuerDNstr in certs:
issuer = certs[issuerDNstr]
else:
issuers = getIssuersFromAIA(cert)
if len(issuers) == 0:
print("Can't get issuer, giving up")
break
issuer = None
for i in issuers:
tbs = i.getComponentByName('tbsCertificate')
subjectDN = tbs.getComponentByName('subject')
print('issuer subject:', DNToString(subjectDN))
if subjectDN == issuerDN:
issuer = i
break
assert issuer is not None
outChain.append(issuer)
if len(outChain) == 1:
tbs = outChain[0].getComponentByName('tbsCertificate')
subjectDN = tbs.getComponentByName('subject')
issuerDN = tbs.getComponentByName('issuer')
if subjectDN == issuerDN:
print("Chain consists of 1 self-signed certificate")
exit(1)
for cert in outChain:
print(cStart)
b64 = b64encode(encoder.encode(cert))
for n in range(0, len(b64), 64):
print(b64[n:n+64])
print(cEnd)
print('*** %d PEM cert(s) deserialized, fixed chain is %d long' % (
len(inChain),
len(outChain)))
|
|
'''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import division
#import matplotlib.pyplot as plt
class wood_stud_wall:
def __init__(self,b_in=1.5,d_in=3.5,height_ft=10, spacing_in=12, grade="No.2", fb_psi=875, fv_psi= 150, fc_psi=1150, E_psi=1400000, Emin_psi=510000, fc_perp_pl_psi=565, moisture_percent = 19, temp = 90, incised = 0, num_plates = 0, c_frt=[1,1,1,1,1,1], compression_face=1, blocking_ft=0, no_sheathing=0, is_syp=0):
self.b_in = b_in
self.d_in = d_in
#Compute wall height either inlcusive or exclusive of wall plates
self.height_in = height_ft * 12.0
if num_plates == 0:
self.height_in = self.height_in
self.assumptions = '\n\n--ASSUMPTIONS--\nDesign Stud Height = Wall Height inclusive of top and bottom plates\nCapacities noted are inclusive of wall self weight - true supporting capacity is (result - panel self weight)\n'
else:
self.height_in = self.height_in - (num_plates * 1.5)
self.assumptions = '\n\n--ASSUMPTIONS--\nDesign Stud Height = Wall Height - ({0}) 1.5" wall plates\nCapacities noted are inclusive of wall self weight - true supporting capacity is (result - panel self weight)\n'.format(num_plates)
self.spacing_in = spacing_in
self.fb_psi = fb_psi
self.fv_psi = fv_psi
self.fc_psi = fc_psi
self.Emin_psi = Emin_psi
self.E_psi = E_psi
self.fc_perp_pl_psi = fc_perp_pl_psi
self.c_frt = c_frt
self.compression_face = compression_face
self.blocking_in = 12*blocking_ft
self.no_sheathing = no_sheathing
self.defl_180 = self.height_in/180.0
self.defl_240 = self.height_in/240.0
self.defl_360 = self.height_in/360.0
#initialize warning log
self.warning = ''
#Stud Section Properties
self.I_in4 = (self.b_in * self.d_in**3)/12.0
self.area_in2 = self.b_in * self.d_in
self.s_in3 = (self.b_in * self.d_in**2)/6.0
#Repetitive Member Factor, CR
#NDS 2005 section 4.3.9
if self.spacing_in > 24:
self.cr = 1.0
else:
self.cr = 1.15
#Size Factor, Cf
#NDS 2005 section 4.3.6 and Table 4A
#NOTE ASSUMES STUDS ARE VISUALLY GRADED DIMENSION LUMBER 2"-4" AND NOT SOUTHERN PINE AND NORTH AMERICAN SPECIES
if is_syp == 0:
self.assumptions = self.assumptions + 'Size Factor_Cf - Wall Studs are visually graded dimensional lumber 2" to 4" North American Species and not Southern Pine\n'
if grade == "Stud":
#Per NDS 2005 Table 4A for stud grade depth >8" use No.3 size factors
if self.d_in>11.25:
self.cf_fc = 0.9
if self.b_in>2.5:
self.cf_fb = 1.0
else:
self.cf_fb = 0.9
elif self.d_in>9.25:
self.cf_fc = 1.0
if self.b_in>2.5:
self.cf_fb = 1.1
else:
self.cf_fb = 1.0
elif self.d_in>7.25:
self.cf_fc = 1.0
if self.b_in>2.5:
self.cf_fb = 1.2
else:
self.cf_fb = 1.1
elif self.d_in>5.5:
self.cf_fc = 1.05
if self.b_in>2.5:
self.cf_fb = 1.3
else:
self.cf_fb = 1.2
elif self.d_in > 3.5:
self.cf_fb = 1.0
self.cf_fc = 1.0
else:
self.cf_fc = 1.05
self.cf_fb = 1.1
elif grade == "Construction":
self.cf_fb = 1.0
self.cf_fc = 1.0
elif grade == "Utility":
if self.d_in > 2.5:
self.cf_fb = 1.0
self.cf_fc = 1.0
else:
self.cf_fc = 0.6
if self.b_in>2.5:
self.cf_fb = 1.0
else:
self.cf_fb = 0.4
else:
if self.d_in>11.25:
self.cf_fc = 0.9
if self.b_in>2.5:
self.cf_fb = 1.0
else:
self.cf_fb = 0.9
elif self.d_in>9.25:
self.cf_fc = 1.0
if self.b_in>2.5:
self.cf_fb = 1.1
else:
self.cf_fb = 1.0
elif self.d_in>7.25:
self.cf_fc = 1.0
if self.b_in>2.5:
self.cf_fb = 1.2
else:
self.cf_fb = 1.1
elif self.d_in>5.5:
self.cf_fc = 1.05
if self.b_in>2.5:
self.cf_fb = 1.3
else:
self.cf_fb = 1.2
elif self.d_in>4.5:
self.cf_fc = 1.1
self.cf_fb = 1.3
elif self.d_in>3.5:
self.cf_fc = 1.1
self.cf_fb = 1.4
else:
self.cf_fc = 1.15
self.cf_fb = 1.5
else:
self.assumptions = self.assumptions + 'Size Factor_Cf - Wall Studs are Southern Pine and stud is less than 4" thick and 12" wide. Cf = 1.0\n'
self.cf_fc = 1.0
self.cf_fb = 1.0
#Wet Service Factor, Cm
#NDS 2005 section 4.3.3 and Table 4A
#NOTE ASSUMES STUDS ARE VISUALLY GRADED DIMENSION LUMBER 2"-4" AND NOT SOUTHERN PINE AND NORTH AMERICAN SPECIES
self.assumptions = self.assumptions + 'Wet Service Factor_Cm - Wall Studs are visually graded dimensional lumber 2" to 4" North American Species or Southern Pine\n'
if moisture_percent > 19:
self.cm_fc_perp = 0.67
self.cm_E = 0.9
self.cm_fv = 0.97
if self.fb_psi*self.cf_fb <= 1150:
self.cm_fb = 1.0
else:
self.cm_fb = 0.85
if self.fc_psi*self.cf_fc <= 750:
self.cm_fc = 1.0
else:
self.cm_fc = 0.8
else:
self.cm_fb = 1.0
self.cm_fc = 1.0
self.cm_fc_perp = 1.0
self.cm_E = 1.0
self.cm_fv = 1.0
#Temperature Factor, Ct
#NDS 2005 section 4.3.4
if temp > 150:
self.warning = self.warning + "Ct not valid see NDS 2005 Appendix C\n"
self.ct_E = 0.01
self.ct_fb = 0.01
self.ct_fc = 0.01
self.ct_fc_perp = 0.01
self.ct_fv = 0.01
elif temp <= 100:
self.ct_E = 1.0
self.ct_fb = 1.0
self.ct_fc = 1.0
self.ct_fc_perp = 1.0
self.ct_fv = 1.0
elif temp <= 125:
self.ct_E = 0.9
if moisture_percent > 19:
self.ct_fb = 0.7
self.ct_fc = 0.7
self.ct_fc_perp = 0.7
self.ct_fv = 0.7
else:
self.ct_fb = 0.8
self.ct_fc = 0.8
self.ct_fc_perp = 0.8
self.ct_fv = 0.8
else:
self.ct_E = 0.9
if moisture_percent > 19:
self.ct_fb = 0.5
self.ct_fc = 0.5
self.ct_fc_perp = 0.5
self.ct_fv = 0.5
else:
self.ct_fb = 0.7
self.ct_fc = 0.7
self.ct_fc_perp = 0.7
self.ct_fv = 0.7
#Flat Use Factor, Cfu
#NDS 2005 section 4.3.7
self.cfu = 1.0 #Wall studs generally not loaded on flat face
self.assumptions = self.assumptions + 'Flat Use Factor_Cfu - Wall studs are not loaded on the flat face\n'
#Incising Factor, Ci
#NDS 2005 section 4.3.8
if incised == 1:
self.ci_E = 0.95
self.ci_fb = 0.8
self.ci_fc = 0.8
self.ci_fv = 0.8
self.ci_fc_perp = 1.0
else:
self.ci_E = 1.0
self.ci_fb = 1.0
self.ci_fc = 1.0
self.ci_fc_perp = 1.0
self.ci_fv = 1.0
#Buckling Siffness Factor, CT
#NDS 2005 4.3.11
self.cT = 1.0 #Not a truss
self.assumptions = self.assumptions + 'Buckling Stiffness Factor_CT - Not Applicable for stud walls\n'
#Bearing Area Factor, Cb
#NDS 2005 4.3.12 and 3.10.4
if self.b_in < 6 :
self.cb_fc_perp = (self.b_in + 0.375) / self.b_in
self.assumptions = self.assumptions + 'Bearing Area Factor_Cb - Stud greater than 3" from bottom plate end\n'
else:
self.cb_fc_perp = 1.0
#Fv' = Fv * Cm * Ct * Ci - apply Cd in Fc and Fb functions
self.fv_prime_psi = self.fv_psi * self.cm_fv * self.ct_fv * self.ci_fv* self.c_frt[1]
#Emin' = Emin * Cm * Ct * Ci * CT - NDS 2005 Table 4.3.1
self.Emin_prime_psi = self.Emin_psi * self.cm_E * self.ct_E * self.ci_E * self.cT * self.c_frt[5]
#Beam Stability Factor, CL
#NDS 2005 section 4.3.5
if self.compression_face == 1.0:
self.cl = 1.0 #Assumes stud walls are sheathed on the compression face
self.assumptions = self.assumptions + 'Beam Stability Factor_CL - Wall studs are continuously sheathed on the compression face\n'
else:
if self.blocking_in == 0 or self.blocking_in > self.height_in:
self.lu_bending_in = self.height_in
else:
self.lu_bending_in = self.blocking_in
if self.height_in/self.d_in < 7.0:
self.cl_le = 2.06 * self.lu_bending_in
elif self.height_in/self.d_in <= 14.3:
self.cl_le = (1.63 * self.lu_bending_in)+(3*self.d_in)
else:
self.cl_le = 1.84 * self.lu_bending_in
self.Rb_cl = (self.cl_le*self.d_in/self.b_in**2)**0.5
self.Fbe_cl = (1.20 * self.Emin_prime_psi)/self.Rb_cl**2
self.assumptions = self.assumptions + 'Beam Stability Factor_CL - Wall studs are not braced on compression face - CL per design stud height\n'
#see Fb' function for remainder of CL calculation as it depends on Cd
#E' = E * Cm * Ct * Ci - NDS 2005 Table 4.3.1
self.E_prime_psi = self.E_psi * self.cm_E * self.ct_E * self.ci_E * self.c_frt[4]
#Pressure to reach deflection limits
self.defl_180_w_psf = ((self.defl_180 * 384 * self.E_prime_psi * self.I_in4) / (1728 * 5 * (self.height_in/12.0)**4))/(self.spacing_in/12.0)
self.defl_240_w_psf= ((self.defl_240 * 384 * self.E_prime_psi * self.I_in4) / (1728 * 5 * (self.height_in/12.0)**4))/(self.spacing_in/12.0)
self.defl_360_w_psf = ((self.defl_360 * 384 * self.E_prime_psi * self.I_in4) / (1728 * 5 * (self.height_in/12.0)**4))/(self.spacing_in/12.0)
#Fc,perp' = Fc,perp * Cm * Ct * Ci * Cb- NDS 2005 Table 4.3.1
self.fc_perp_pl_prime_psi = self.fc_perp_pl_psi * self.cm_fc_perp * self.ct_fc_perp * self.ci_fc_perp * self.cb_fc_perp * self.c_frt[3]
self.crushing_limit_lbs = self.area_in2 * self.fc_perp_pl_prime_psi
self.crushing_limit_lbs_no_cb = self.area_in2 * (self.fc_perp_pl_prime_psi/self.cb_fc_perp)
def fc_prime_calc(self, cd):
#apply cd to Fv'
self.fv_prime_psi_cd = self.fv_prime_psi * cd
#Fc* = reference compression design value parallel to grain multiplied by all applicable adjusment factors except Cp
self.fc_star_psi = self.fc_psi * cd * self.cm_fc * self.ct_fc * self.cf_fc * self.ci_fc * self.c_frt[2]
self.c_cp = 0.8
self.assumptions_c = 'c for Cp calculation based on sawn lumber - NDS 2005 3.7.1\n'
#Slenderness Ratio check per NDS 2005 sections 3.7.1.2 thru 3.7.1.4
kb = 1.0
kd = 1.0
self.assumptions_ke = '\nKe = 1.0 for both depth and breadth of studs - Ref NDS 2005 appendix G pin top and bottom\n'
if self.no_sheathing == 1 and self.blocking_in > 0:
leb = self.blocking_in
self.assumptions_leb = 'Le_b = {0:.2f} in. - no sheathing weak axis only braced by blocking\n Confirm load path exists for bracing force.\n'.format(leb)
elif self.no_sheathing == 1 and self.blocking_in <= 0:
leb = self.height_in
self.assumptions_leb = 'Le_b = {0:.2f} in. - no sheathing and no blocking - weak axis unbraced.\n'.format(leb)
else:
leb = 12 * kb
self.assumptions_leb = 'Le_b = 12.0 in. - continuously braced by sheathing 12" field nailing assumed\n'
led = self.height_in * kd
self.le_b = leb
#Check Le/d,b ratios less than 50 - NDS 2005 Section 3.7.1.4
if leb / self.b_in > 50 or led/self.d_in > 50:
ratio_status = 0
else:
ratio_status = 1.0
if ratio_status == 1.0:
#FcE = 0.822 * Emin' / (Le/d)^2 - NDS 2005 Section 3.7.1
self.fcE_psi = (0.822 * self.Emin_prime_psi)/(max(leb/self.b_in,led/self.d_in))**2
#Cp = ([1 + (FcE / Fc*)] / 2c ) - sqrt[ [1 + (FcE / Fc*) / 2c]^2 - (FcE / Fc*) / c] - NDS 2005 Section 3.7.1
self.cp = ((1+(self.fcE_psi/self.fc_star_psi))/(2*self.c_cp))-((((1+(self.fcE_psi/self.fc_star_psi))/(2*self.c_cp))**2)-((self.fcE_psi/self.fc_star_psi)/self.c_cp))**0.5
self.fc_prime_psi = self.fc_star_psi * self.cp
self.assumptions_cp = 'Wall studs are not tapered and not subject to NDS 2005 - 3.7.2\n'
else:
self.fc_prime_psi = 1
self.fcE_psi = 1
self.cp = 0.001
self.warning=self.warning + 'Slenderness ratio greater than 50, suggest increase stud size or reducing wall height\n'
self.assumptions_cp = ''
return self.fc_prime_psi
def fb_prime_calc(self, cd):
#apply cd to Fv'
self.fv_prime_psi_cd = self.fv_prime_psi * cd
if self.compression_face == 1.0:
self.fb_prime_psi = self.fb_psi * cd * self.cm_fb * self.ct_fb * self.cl * self.cf_fb * self.cfu * self.ci_fb * self.cr * self.c_frt[0]
else:
self.fb_star_psi = self.fb_psi * cd * self.cm_fb * self.ct_fb * self.cf_fb * self.cfu * self.ci_fb * self.cr * self.c_frt[0]
self.fbe_fbstar = self.Fbe_cl / self.fb_star_psi
#NDS equation 3.3-6
self.cl = ((1+self.fbe_fbstar)/1.9) - ((((1+self.fbe_fbstar)/1.9)**2) - (self.fbe_fbstar)/0.95)**0.5
self.fb_prime_psi = self.fb_psi * cd * self.cm_fb * self.ct_fb * self.cl * self.cf_fb * self.cfu * self.ci_fb * self.cr * self.c_frt[0]
self.cl_calc_text = "\n\n--Calculation of CL--\nLe = {0:.3f} in - per NDS Table 3.3.3 footnote 1 \nRb = sqrt(Le*d / b^2) = {1:.3f}\nFbE = 1.20 * Emin' /Rb^2 = {2:.3f} psi\nFb* = reference bending design value multiplied by all applicable adjustment factors except Cfu, Cv, and CL\nFb* = {3:.3f} psi\nFbE/Fb* = {4:.3f}\nNDS Eq. 3.3-6\nCL = [1 + (FbE / Fb*)] / 1.9 - ( [ [1 + (FbE / Fb*)] / 1.9 ]^2 - (FbE / Fb*) / 0.95 ) ^ 1/2 = {5:.3f}".format(self.cl_le, self.Rb_cl, self.Fbe_cl, self.fb_star_psi, self.fbe_fbstar, self.cl)
return self.fb_prime_psi
def axial_and_bending(self, cd, p_lbs, m_inlbs):
fc_psi = p_lbs / self.area_in2
fb_psi = m_inlbs/self.s_in3
fc_prime = self.fc_prime_calc(cd)
fb_prime = self.fb_prime_calc(cd)
#Check that fc is less than FcE per NDS 2005 - Section 3.9.2
if fc_psi < self.fcE_psi:
#Combine ratio per NDS 2005 equation (3.9-3)
#[fc/Fc]'^2 + fb / Fb' [ 1- (fc / FcE)] <= 1.0
ratio = (fc_psi/fc_prime)**2 + (fb_psi / (fb_prime*(1-(fc_psi/self.fcE_psi))))
if ratio > 1.0:
self.warning=self.warning + 'Combined Axial and Bending ratio > 1.0\n'
return 'NG'
else:
return 'OK'
else:
self.warning=self.warning + 'fc is greater than FcE\n'
return 'NG'
def axial_capacity_w_moment(self,cd,m_inlbs,e_in):
#solve for the allowable axial load using the bisection method
a=0
b=self.area_in2 * self.fc_prime_calc(cd) #upper bound limit on axial strength
c=0
loop_max = 500
tol = 0.00001
loop = 0
p_lbs = 0
while loop<loop_max:
c = (a+b)/2.0
fc_psi = c / self.area_in2
fb_psi = (m_inlbs)/self.s_in3
fc_prime = self.fc_prime_calc(cd)
fb_prime = self.fb_prime_calc(cd)
if self.fc_prime_psi == 1 and self.fcE_psi == 1:
p_lbs = 1
loop = loop_max
else:
#Check that fc is less than FcE per NDS 2005 - Section 3.9.2
if fc_psi < self.fcE_psi:
if e_in ==0:
#Combine ration per NDS 2005 equation (3.9-3)
#[fc/Fc]'^2 + fb / Fb' [ 1- (fc / FcE)] <= 1.0
ratio = (fc_psi/fc_prime)**2 + (fb_psi / (fb_prime*(1-(fc_psi/self.fcE_psi))))
else:
#Combined Ratio per NDS 2005 equation 15.4-1
#[fc/Fc]'^2 + (fb + fc(6e/d)[1 + 0.234 (fc / FcE)])/ Fb' [ 1- (fc / FcE)] <= 1.0
ratio = (fc_psi/fc_prime)**2 + ((fb_psi+(fc_psi*(6*e_in/self.d_in)*(1+(0.234*(fc_psi/self.fcE_psi)))))/ (fb_prime*(1-(fc_psi/self.fcE_psi))))
else:
ratio = 2.0
if ratio > 1.0:
b = c
else:
a = c
if (b-a)/2.0 <= tol:
loop = loop_max
p_lbs = c
else:
loop+=1
return p_lbs
def wall_interaction_diagram_cd(self, cd, e_in,s_in):
if s_in == 0:
diag_spacing_in = self.spacing_in
else:
diag_spacing_in = s_in
# Find bending limit pressure for each Cd ie where fb = Fb'
# fb = M/s , M in in-lbs and s in in^3
# M = w * stud height^2 / 8
# w = Fb' * s * 8 / stud height^2 * (12 in / 1 ft)
self.w_plf_limit = ((self.fb_prime_calc(cd) * self.s_in3 * 8.0) / (self.height_in**2)) * 12.0
self.w_psf_limit = self.w_plf_limit/(diag_spacing_in/12.0)
# Determine pure axial compression capacity ie where fc = Fc' - withou consideration for plate crushing
# fc = P/a
# P = a * Fc'
if e_in == 0:
self.p_lbs_limit = self.area_in2 * self.fc_prime_calc(cd)
d=[0] #deflection at pressure x
else:
self.p_lbs_limit = self.axial_capacity_w_moment(cd,0, e_in)
d=[(((self.p_lbs_limit*e_in)*self.height_in**2)/(16.0*self.E_prime_psi*self.I_in4))]
points = 50
step = self.w_psf_limit/points
w=0
x=[0] #pressure on x-axis
y=[self.p_lbs_limit/ (diag_spacing_in /12.0)] #axial force on y-axis
for i in range(1,points):
w = step*i
x.append(w)
w_plf = w * (diag_spacing_in/12)
moment_inlbs = (((w_plf) * (self.height_in/12)**2) / 8.0)*12
deflection = (5 * (w_plf) * (self.height_in/12)**4)/(384*self.E_prime_psi*self.I_in4)*1728
p_lbs = self.axial_capacity_w_moment(cd,moment_inlbs, e_in)
p_plf = p_lbs/ (diag_spacing_in /12.0)
if e_in ==0:
deflection = deflection
else:
deflection = deflection + (((p_lbs*e_in)*self.height_in**2)/(16.0*self.E_prime_psi*self.I_in4))
d.append(deflection)
y.append(p_plf)
x.append(self.w_psf_limit)
y.append(0)
d.append((5 * (self.w_plf_limit) * (self.height_in/12)**4)/(384*self.E_prime_psi*self.I_in4)*1728)
return x,y,d
def wall_pm_diagram_cd(self, cd, e_in, s_in):
if s_in == 0:
diag_spacing_in = self.spacing_in
else:
diag_spacing_in = s_in
# Find bending limit pressure for each Cd ie where fb = Fb'
# fb = M/s , M in in-lbs and s in in^3
self.m_inlbs_limit = (self.fb_prime_calc(cd) * self.s_in3)
# Determine pure axial compression capacity ie where fc = Fc' - withou consideration for plate crushing
# fc = P/a
# P = a * Fc'
if e_in == 0:
self.p_lbs_limit = self.area_in2 * self.fc_prime_calc(cd)
else:
self.p_lbs_limit = self.axial_capacity_w_moment(cd,0, e_in)
points = 50
step = self.m_inlbs_limit/points
m=0
x=[0] #moment on x-axis
y=[self.p_lbs_limit/ (diag_spacing_in /12.0)] #axial force on y-axis
if e_in==0:
d=[0] #deflection at equivalent uniform load x
else:
d=[(((self.p_lbs_limit*e_in)*self.height_in**2)/(16.0*self.E_prime_psi*self.I_in4))]
for i in range(1,points):
m = step*i
moment_inlbs = m
x.append(m)
w_plf = ((((m/12.0) * 8.0) / ((self.height_in/12.0)**2)))
deflection = (5 * (w_plf) * (self.height_in/12)**4)/(384*self.E_prime_psi*self.I_in4)*1728
p_lbs = self.axial_capacity_w_moment(cd,moment_inlbs, e_in)
p_plf = p_lbs / (diag_spacing_in /12.0)
y.append(p_plf)
if e_in ==0:
deflection = deflection
else:
deflection = deflection + (((p_lbs*e_in)*self.height_in**2)/(16.0*self.E_prime_psi*self.I_in4))
d.append(deflection)
x.append(self.m_inlbs_limit)
y.append(0)
w_plf = ((((self.m_inlbs_limit/12.0) * 8.0) / ((self.height_in/12.0)**2)))
d.append((5 * (w_plf) * (self.height_in/12)**4)/(384*self.E_prime_psi*self.I_in4)*1728)
return x,y,d
def wall_pm_diagram_cd_stud(self, cd, e_in):
# Find bending limit pressure for each Cd ie where fb = Fb'
# fb = M/s , M in in-lbs and s in in^3
self.m_inlbs_limit = (self.fb_prime_calc(cd) * self.s_in3)
# Determine pure axial compression capacity ie where fc = Fc' - withou consideration for plate crushing
# fc = P/a
# P = a * Fc'
if e_in == 0:
self.p_lbs_limit = self.area_in2 * self.fc_prime_calc(cd)
else:
self.p_lbs_limit = self.axial_capacity_w_moment(cd,0, e_in)
points = 50
step = self.m_inlbs_limit/points
m=0
x=[0] #moment on x-axis
y=[self.p_lbs_limit] #axial force on y-axis
if e_in==0:
d=[0] #deflection at pressure x
else:
d=[(((self.p_lbs_limit*e_in)*self.height_in**2)/(16.0*self.E_prime_psi*self.I_in4))]
for i in range(1,points):
m = step*i
moment_inlbs = m
x.append(m)
w_plf = ((((m/12.0) * 8.0) / ((self.height_in/12.0)**2)))
deflection = (5 * (w_plf) * (self.height_in/12)**4)/(384*self.E_prime_psi*self.I_in4)*1728
p_lbs = self.axial_capacity_w_moment(cd,moment_inlbs, e_in)
y.append(p_lbs)
if e_in ==0:
deflection = deflection
else:
deflection = deflection + (((p_lbs*e_in)*self.height_in**2)/(16.0*self.E_prime_psi*self.I_in4))
d.append(deflection)
x.append(self.m_inlbs_limit)
y.append(0)
w_plf = ((((self.m_inlbs_limit/12.0) * 8.0) / ((self.height_in/12.0)**2)))
d.append((5 * (w_plf) * (self.height_in/12)**4)/(384*self.E_prime_psi*self.I_in4)*1728)
return x,y,d
def cap_at_common_spacing(self, cd,lateral_w_psf, e_in, crush=1):
spacings = [4,6,8,12,16,24]
res_string = 'Axial Capacity at 4" - 6" - 8" - 12" - 16" - 24" spacings:\n'
self.cap_at_common = []
for s in spacings:
w_plf = lateral_w_psf * (s/12.0)
m_inlbs = ((w_plf * (self.height_in/12.0)**2)/8.0)*12
deflection = (5 * (w_plf) * (self.height_in/12)**4)/(384*self.E_prime_psi*self.I_in4)*1728
p_lbs = self.axial_capacity_w_moment(cd,m_inlbs,e_in)
if crush == 1:
p_lbs = min(p_lbs,self.crushing_limit_lbs)
else:
p_lbs = p_lbs
p_plf = p_lbs / (s/12.0)
if e_in ==0:
deflection = deflection
else:
deflection = deflection + (((p_lbs*e_in)*self.height_in**2)/(16.0*self.E_prime_psi*self.I_in4))
d_ratio = self.height_in / deflection
d_string = 'H/{0:.1f}'.format(d_ratio)
res_string = res_string + '{0:.3f} ft - {1}" O.C. - {2:.2f} Lbs ({3:.2f} plf) - {4}\n'.format(self.height_in/12.0,s,p_lbs,p_plf,d_string)
res_list = [s,p_lbs,p_plf,d_string]
self.cap_at_common.append(res_list)
return res_string
'''
#Cd - NDS 2005 Table 2.3.2
cd = [0.9,1.0,1.15,1.25,1.6,2.0]
wall = wood_stud_wall(1.5,5.5,18,16,"No.2",875,150,1150,1400000,510000,200,19,90,0,0,[1,1,1,1,1,1])
fc_prime = wall.fc_prime_calc(1.0)
cp = wall.cp
print '---Warnings--\n'
print wall.warning
print wall.assumptions
fig, ax1 = plt.subplots()
ax1.minorticks_on()
ax1.grid(b=True, which='major', color='k', linestyle='-', alpha=0.3)
ax1.grid(b=True, which='minor', color='g', linestyle='-', alpha=0.1)
ax2 = ax1.twinx()
for x in cd:
w,p,d = wall.wall_interaction_diagram_cd(x,5.5/6.0)
ax1.plot(w,p)
ax1.plot([0,max(w)],[wall.crushing_limit_lbs,wall.crushing_limit_lbs])
ax1.plot([0,max(w)],[wall.crushing_limit_lbs_no_cb,wall.crushing_limit_lbs_no_cb])
ax2.plot(w,d)
ax2.plot([wall.defl_180_w_psf,wall.defl_180_w_psf],[0,max(d)])
ax2.plot([wall.defl_240_w_psf,wall.defl_240_w_psf],[0,max(d)])
ax2.plot([wall.defl_360_w_psf,wall.defl_360_w_psf],[0,max(d)])
ax1.set_ylabel('Axial (lbs)')
ax1.set_xlabel('Pressure (psf)')
ax2.set_ylabel('Deflection (in)')
plt.title('2x6 SPF No.2 - 18 ft tall - 16" spacing')
fig.tight_layout()
plt.show()
'''
|
|
import os
import tempfile
from io import StringIO
from wsgiref.util import FileWrapper
from django import forms
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import GroupAdmin, UserAdmin
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.core.mail import EmailMessage
from django.db import models
from django.forms.models import BaseModelFormSet
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from .forms import MediaActionForm
from .models import (
Actor, AdminOrderedAdminMethod, AdminOrderedCallable, AdminOrderedField,
AdminOrderedModelMethod, Album, Answer, Article, BarAccount, Book,
Bookmark, Category, Chapter, ChapterXtra1, Child, ChildOfReferer, Choice,
City, Collector, Color, Color2, ComplexSortedPerson, CoverLetter,
CustomArticle, CyclicOne, CyclicTwo, DependentChild, DooHickey, EmptyModel,
EmptyModelHidden, EmptyModelMixin, EmptyModelVisible, ExplicitlyProvidedPK,
ExternalSubscriber, Fabric, FancyDoodad, FieldOverridePost,
FilteredManager, FooAccount, FoodDelivery, FunkyTag, Gadget, Gallery,
GenRelReference, Grommet, ImplicitlyGeneratedPK, Ingredient,
InlineReference, InlineReferer, Inquisition, Language, Link,
MainPrepopulated, ModelWithStringPrimaryKey, NotReferenced, OldSubscriber,
OtherStory, Paper, Parent, ParentWithDependentChildren, ParentWithUUIDPK,
Person, Persona, Picture, Pizza, Plot, PlotDetails, PlotProxy,
PluggableSearchPerson, Podcast, Post, PrePopulatedPost,
PrePopulatedPostLargeSlug, PrePopulatedSubPost, Promo, Question, Recipe,
Recommendation, Recommender, ReferencedByGenRel, ReferencedByInline,
ReferencedByParent, RelatedPrepopulated, RelatedWithUUIDPKModel, Report,
Reservation, Restaurant, RowLevelChangePermissionModel, Section,
ShortMessage, Simple, Sketch, State, Story, StumpJoke, Subscriber,
SuperVillain, Telegram, Thing, Topping, UnchangeableObject,
UndeletableObject, UnorderedObject, UserMessenger, Villain, Vodcast,
Whatsit, Widget, Worker, WorkHour,
)
def callable_year(dt_value):
try:
return dt_value.year
except AttributeError:
return None
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
fk_name = 'section'
prepopulated_fields = {
'title': ('content',)
}
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section')
})
)
class ChapterInline(admin.TabularInline):
model = Chapter
class ChapterXtra1Admin(admin.ModelAdmin):
list_filter = ('chap',
'chap__title',
'chap__book',
'chap__book__name',
'chap__book__promo',
'chap__book__promo__name',)
class ArticleAdmin(admin.ModelAdmin):
list_display = (
'content', 'date', callable_year, 'model_year', 'modeladmin_year',
'model_year_reversed', 'section', lambda obj: obj.title,
)
list_editable = ('section',)
list_filter = ('date', 'section')
view_on_site = False
fieldsets = (
('Some fields', {
'classes': ('collapse',),
'fields': ('title', 'content')
}),
('Some other fields', {
'classes': ('wide',),
'fields': ('date', 'section', 'sub_section')
})
)
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
modeladmin_year.short_description = None
def delete_model(self, request, obj):
EmailMessage(
'Greetings from a deleted object',
'I hereby inform you that some user deleted me',
'from@example.com',
['to@example.com']
).send()
return super().delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
'Greetings from a created object',
'I hereby inform you that some user created me',
'from@example.com',
['to@example.com']
).send()
return super().save_model(request, obj, form, change)
class ArticleAdmin2(admin.ModelAdmin):
def has_module_permission(self, request):
return False
class RowLevelChangePermissionModelAdmin(admin.ModelAdmin):
def has_change_permission(self, request, obj=None):
""" Only allow changing objects with even id number """
return request.user.is_staff and (obj is not None) and (obj.id % 2 == 0)
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
add_form_template = 'custom_admin/add_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
delete_selected_confirmation_template = 'custom_admin/delete_selected_confirmation.html'
popup_response_template = 'custom_admin/popup_response.html'
def changelist_view(self, request):
return super().changelist_view(request, extra_context={'extra_var': 'Hello!'})
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color__warm', 'color__value', 'pub_date',)
class InquisitionAdmin(admin.ModelAdmin):
list_display = ('leader', 'country', 'expected', 'sketch')
def sketch(self, obj):
# A method with the same name as a reverse accessor.
return 'list-display-sketch'
class SketchAdmin(admin.ModelAdmin):
raw_id_fields = ('inquisition', 'defendant0', 'defendant1')
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class BasePersonModelFormSet(BaseModelFormSet):
def clean(self):
for person_dict in self.cleaned_data:
person = person_dict.get('id')
alive = person_dict.get('alive')
if person and alive and person.name == "Grace Hopper":
raise forms.ValidationError("Grace is not a Zombie")
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = ('^name',)
save_as = True
def get_changelist_formset(self, request, **kwargs):
return super().get_changelist_formset(request, formset=BasePersonModelFormSet, **kwargs)
def get_queryset(self, request):
# Order by a field that isn't in list display, to be able to test
# whether ordering is preserved.
return super().get_queryset(request).order_by('age')
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
action_form = MediaActionForm
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from an admin action',
'from@example.com',
['to@example.com']
).send()
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
external_mail.short_description = 'External mail (Another awesome action)'
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
redirect_to.short_description = 'Redirect to (Awesome action)'
def download(modeladmin, request, selected):
buf = StringIO('This is the content of the file')
return StreamingHttpResponse(FileWrapper(buf))
download.short_description = 'Download subscription'
def no_perm(modeladmin, request, selected):
return HttpResponse(content='No permission to perform this action',
status=403)
no_perm.short_description = 'No permission to run'
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [redirect_to, external_mail, download, no_perm]
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
date_hierarchy = 'release_date'
ordering = ('name',)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
save_as = True
list_display = ('id', 'name',)
list_display_links = ('id',)
list_editable = ('name',)
def save_related(self, request, form, formsets, change):
super().save_related(request, form, formsets, change)
first_name, last_name = form.instance.name.split()
for child in form.instance.child_set.all():
if len(child.name.split()) < 2:
child.name = child.name + ' ' + last_name
child.save()
class EmptyModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return super().get_queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
class RecommendationAdmin(admin.ModelAdmin):
show_full_result_count = False
search_fields = ('=titletranslation__text', '=the_recommender__titletranslation__text',)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [
WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline,
FancyDoodadInline, CategoryInline
]
class LinkInline(admin.TabularInline):
model = Link
extra = 1
readonly_fields = ("posted", "multiline", "readonly_link_content")
def multiline(self, instance):
return "InlineMultiline\ntest\nstring"
class SubPostInline(admin.TabularInline):
model = PrePopulatedSubPost
prepopulated_fields = {
'subslug': ('subtitle',)
}
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('subslug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PrePopulatedPostAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {
'slug': ('title',)
}
inlines = [SubPostInline]
def get_readonly_fields(self, request, obj=None):
if obj and obj.published:
return ('slug',)
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
if obj and obj.published:
return {}
return self.prepopulated_fields
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'public']
readonly_fields = (
'posted', 'awesomeness_level', 'coolness', 'value',
'multiline', 'multiline_html', lambda obj: "foo",
'readonly_content',
)
inlines = [
LinkInline
]
def coolness(self, instance):
if instance.pk:
return "%d amount of cool." % instance.pk
else:
return "Unknown coolness."
def value(self, instance):
return 1000
value.short_description = 'Value in $US'
def multiline(self, instance):
return "Multiline\ntest\nstring"
def multiline_html(self, instance):
return mark_safe("Multiline<br>\nhtml<br>\ncontent")
class FieldOverridePostForm(forms.ModelForm):
model = FieldOverridePost
class Meta:
help_texts = {
'posted': 'Overridden help text for the date',
}
labels = {
'public': 'Overridden public label',
}
class FieldOverridePostAdmin(PostAdmin):
form = FieldOverridePostForm
class CustomChangeList(ChangeList):
def get_queryset(self, request):
return self.root_queryset.order_by('pk').filter(pk=9999) # Doesn't exist
class GadgetAdmin(admin.ModelAdmin):
def get_changelist(self, request, **kwargs):
return CustomChangeList
class ToppingAdmin(admin.ModelAdmin):
readonly_fields = ('pizzas',)
class PizzaAdmin(admin.ModelAdmin):
readonly_fields = ('toppings',)
class WorkHourAdmin(admin.ModelAdmin):
list_display = ('datum', 'employee')
list_filter = ('employee',)
class FoodDeliveryAdmin(admin.ModelAdmin):
list_display = ('reference', 'driver', 'restaurant')
list_editable = ('driver', 'restaurant')
class CoverLetterAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing CoverLetter
instances. Note that the CoverLetter model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).defer('date_written')
class PaperAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Paper
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).only('title')
class ShortMessageAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses defer(), to test
verbose_name display in messages shown after adding/editing ShortMessage
instances.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).defer('timestamp')
class TelegramAdmin(admin.ModelAdmin):
"""
A ModelAdmin with a custom get_queryset() method that uses only(), to test
verbose_name display in messages shown after adding/editing Telegram
instances. Note that the Telegram model defines a __str__ method.
For testing fix for ticket #14529.
"""
def get_queryset(self, request):
return super().get_queryset(request).only('title')
class StoryForm(forms.ModelForm):
class Meta:
widgets = {'title': forms.HiddenInput}
class StoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title',) # 'id' not in list_display_links
list_editable = ('content', )
form = StoryForm
ordering = ['-id']
class OtherStoryAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
list_display_links = ('title', 'id') # 'id' in list_display_links
list_editable = ('content', )
ordering = ['-id']
class ComplexSortedPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age', 'is_employee', 'colored_name')
ordering = ('name',)
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
colored_name.admin_order_field = 'name'
class PluggableSearchPersonAdmin(admin.ModelAdmin):
list_display = ('name', 'age')
search_fields = ('name',)
def get_search_results(self, request, queryset, search_term):
queryset, use_distinct = super().get_search_results(request, queryset, search_term)
try:
search_term_as_int = int(search_term)
except ValueError:
pass
else:
queryset |= self.model.objects.filter(age=search_term_as_int)
return queryset, use_distinct
class AlbumAdmin(admin.ModelAdmin):
list_filter = ['title']
class PrePopulatedPostLargeSlugAdmin(admin.ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
class AdminOrderedFieldAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'order')
class AdminOrderedModelMethodAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', 'some_order')
class AdminOrderedAdminMethodAdmin(admin.ModelAdmin):
def some_admin_order(self, obj):
return obj.order
some_admin_order.admin_order_field = 'order'
ordering = ('order',)
list_display = ('stuff', 'some_admin_order')
def admin_ordered_callable(obj):
return obj.order
admin_ordered_callable.admin_order_field = 'order'
class AdminOrderedCallableAdmin(admin.ModelAdmin):
ordering = ('order',)
list_display = ('stuff', admin_ordered_callable)
class ReportAdmin(admin.ModelAdmin):
def extra(self, request):
return HttpResponse()
def get_urls(self):
# Corner case: Don't call parent implementation
return [
url(r'^extra/$',
self.extra,
name='cable_extra'),
]
class CustomTemplateBooleanFieldListFilter(BooleanFieldListFilter):
template = 'custom_filter_template.html'
class CustomTemplateFilterColorAdmin(admin.ModelAdmin):
list_filter = (('warm', CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
class RelatedPrepopulatedInline1(admin.StackedInline):
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2',),)
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class RelatedPrepopulatedInline2(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
prepopulated_fields = {'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name']}
class MainPrepopulatedAdmin(admin.ModelAdmin):
inlines = [RelatedPrepopulatedInline1, RelatedPrepopulatedInline2]
fieldsets = (
(None, {
'fields': (('pubdate', 'status'), ('name', 'slug1', 'slug2', 'slug3'))
}),
)
formfield_overrides = {models.CharField: {'strip': False}}
prepopulated_fields = {
'slug1': ['name', 'pubdate'],
'slug2': ['status', 'name'],
'slug3': ['name'],
}
class UnorderedObjectAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
list_display_links = ['id']
list_editable = ['name']
list_per_page = 2
class UndeletableObjectAdmin(admin.ModelAdmin):
def change_view(self, *args, **kwargs):
kwargs['extra_context'] = {'show_delete': False}
return super().change_view(*args, **kwargs)
class UnchangeableObjectAdmin(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super().get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
def callable_on_unknown(obj):
return obj.unknown
class AttributeErrorRaisingAdmin(admin.ModelAdmin):
list_display = [callable_on_unknown, ]
class CustomManagerAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return FilteredManager.objects
class MessageTestingAdmin(admin.ModelAdmin):
actions = ["message_debug", "message_info", "message_success",
"message_warning", "message_error", "message_extra_tags"]
def message_debug(self, request, selected):
self.message_user(request, "Test debug", level="debug")
def message_info(self, request, selected):
self.message_user(request, "Test info", level="info")
def message_success(self, request, selected):
self.message_user(request, "Test success", level="success")
def message_warning(self, request, selected):
self.message_user(request, "Test warning", level="warning")
def message_error(self, request, selected):
self.message_user(request, "Test error", level="error")
def message_extra_tags(self, request, selected):
self.message_user(request, "Test tags", extra_tags="extra_tag")
class ChoiceList(admin.ModelAdmin):
list_display = ['choice']
readonly_fields = ['choice']
fields = ['choice']
class DependentChildAdminForm(forms.ModelForm):
"""
Issue #20522
Form to test child dependency on parent object's validation
"""
def clean(self):
parent = self.cleaned_data.get('parent')
if parent.family_name and parent.family_name != self.cleaned_data.get('family_name'):
raise ValidationError("Children must share a family name with their parents " +
"in this contrived test case")
return super().clean()
class DependentChildInline(admin.TabularInline):
model = DependentChild
form = DependentChildAdminForm
class ParentWithDependentChildrenAdmin(admin.ModelAdmin):
inlines = [DependentChildInline]
# Tests for ticket 11277 ----------------------------------
class FormWithoutHiddenField(forms.ModelForm):
first = forms.CharField()
second = forms.CharField()
class FormWithoutVisibleField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField(widget=forms.HiddenInput)
class FormWithVisibleAndHiddenField(forms.ModelForm):
first = forms.CharField(widget=forms.HiddenInput)
second = forms.CharField()
class EmptyModelVisibleAdmin(admin.ModelAdmin):
form = FormWithoutHiddenField
fieldsets = (
(None, {
'fields': (('first', 'second'),),
}),
)
class EmptyModelHiddenAdmin(admin.ModelAdmin):
form = FormWithoutVisibleField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class EmptyModelMixinAdmin(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
class CityInlineAdmin(admin.TabularInline):
model = City
view_on_site = False
class StateAdmin(admin.ModelAdmin):
inlines = [CityInlineAdmin]
class RestaurantInlineAdmin(admin.TabularInline):
model = Restaurant
view_on_site = True
class CityAdmin(admin.ModelAdmin):
inlines = [RestaurantInlineAdmin]
view_on_site = True
class WorkerAdmin(admin.ModelAdmin):
def view_on_site(self, obj):
return '/worker/%s/%s/' % (obj.surname, obj.name)
class WorkerInlineAdmin(admin.TabularInline):
model = Worker
def view_on_site(self, obj):
return '/worker_inline/%s/%s/' % (obj.surname, obj.name)
class RestaurantAdmin(admin.ModelAdmin):
inlines = [WorkerInlineAdmin]
view_on_site = False
def get_changeform_initial_data(self, request):
return {'name': 'overridden_value'}
class FunkyTagAdmin(admin.ModelAdmin):
list_display = ('name', 'content_object')
class InlineReferenceInline(admin.TabularInline):
model = InlineReference
class InlineRefererAdmin(admin.ModelAdmin):
inlines = [InlineReferenceInline]
class PlotReadonlyAdmin(admin.ModelAdmin):
readonly_fields = ('plotdetails',)
class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
fields = ['name']
def add_view(self, request, *args, **kwargs):
request.is_add_view = True
return super().add_view(request, *args, **kwargs)
def change_view(self, request, *args, **kwargs):
request.is_add_view = False
return super().change_view(request, *args, **kwargs)
def get_formsets_with_inlines(self, request, obj=None):
if request.is_add_view and obj is not None:
raise Exception("'obj' passed to get_formsets_with_inlines wasn't None during add_view")
if not request.is_add_view and obj is None:
raise Exception("'obj' passed to get_formsets_with_inlines was None during change_view")
return super().get_formsets_with_inlines(request, obj)
site = admin.AdminSite(name="admin")
site.site_url = '/my-site-url/'
site.register(Article, ArticleAdmin)
site.register(CustomArticle, CustomArticleAdmin)
site.register(Section, save_as=True, inlines=[ArticleInline], readonly_fields=['name_property'])
site.register(ModelWithStringPrimaryKey)
site.register(Color)
site.register(Thing, ThingAdmin)
site.register(Actor)
site.register(Inquisition, InquisitionAdmin)
site.register(Sketch, SketchAdmin)
site.register(Person, PersonAdmin)
site.register(Persona, PersonaAdmin)
site.register(Subscriber, SubscriberAdmin)
site.register(ExternalSubscriber, ExternalSubscriberAdmin)
site.register(OldSubscriber, OldSubscriberAdmin)
site.register(Podcast, PodcastAdmin)
site.register(Vodcast, VodcastAdmin)
site.register(Parent, ParentAdmin)
site.register(EmptyModel, EmptyModelAdmin)
site.register(Fabric, FabricAdmin)
site.register(Gallery, GalleryAdmin)
site.register(Picture, PictureAdmin)
site.register(Language, LanguageAdmin)
site.register(Recommendation, RecommendationAdmin)
site.register(Recommender)
site.register(Collector, CollectorAdmin)
site.register(Category, CategoryAdmin)
site.register(Post, PostAdmin)
site.register(FieldOverridePost, FieldOverridePostAdmin)
site.register(Gadget, GadgetAdmin)
site.register(Villain)
site.register(SuperVillain)
site.register(Plot)
site.register(PlotDetails)
site.register(PlotProxy, PlotReadonlyAdmin)
site.register(Bookmark)
site.register(CyclicOne)
site.register(CyclicTwo)
site.register(WorkHour, WorkHourAdmin)
site.register(Reservation)
site.register(FoodDelivery, FoodDeliveryAdmin)
site.register(RowLevelChangePermissionModel, RowLevelChangePermissionModelAdmin)
site.register(Paper, PaperAdmin)
site.register(CoverLetter, CoverLetterAdmin)
site.register(ShortMessage, ShortMessageAdmin)
site.register(Telegram, TelegramAdmin)
site.register(Story, StoryAdmin)
site.register(OtherStory, OtherStoryAdmin)
site.register(Report, ReportAdmin)
site.register(MainPrepopulated, MainPrepopulatedAdmin)
site.register(UnorderedObject, UnorderedObjectAdmin)
site.register(UndeletableObject, UndeletableObjectAdmin)
site.register(UnchangeableObject, UnchangeableObjectAdmin)
site.register(State, StateAdmin)
site.register(City, CityAdmin)
site.register(Restaurant, RestaurantAdmin)
site.register(Worker, WorkerAdmin)
site.register(FunkyTag, FunkyTagAdmin)
site.register(ReferencedByParent)
site.register(ChildOfReferer)
site.register(ReferencedByInline)
site.register(InlineReferer, InlineRefererAdmin)
site.register(ReferencedByGenRel)
site.register(GenRelReference)
site.register(ParentWithUUIDPK)
site.register(RelatedWithUUIDPKModel)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four paths through
# contrib.admin.utils's get_deleted_objects function.
site.register(Book, inlines=[ChapterInline])
site.register(Promo)
site.register(ChapterXtra1, ChapterXtra1Admin)
site.register(Pizza, PizzaAdmin)
site.register(Topping, ToppingAdmin)
site.register(Album, AlbumAdmin)
site.register(Question)
site.register(Answer, date_hierarchy='question__posted')
site.register(PrePopulatedPost, PrePopulatedPostAdmin)
site.register(ComplexSortedPerson, ComplexSortedPersonAdmin)
site.register(FilteredManager, CustomManagerAdmin)
site.register(PluggableSearchPerson, PluggableSearchPersonAdmin)
site.register(PrePopulatedPostLargeSlug, PrePopulatedPostLargeSlugAdmin)
site.register(AdminOrderedField, AdminOrderedFieldAdmin)
site.register(AdminOrderedModelMethod, AdminOrderedModelMethodAdmin)
site.register(AdminOrderedAdminMethod, AdminOrderedAdminMethodAdmin)
site.register(AdminOrderedCallable, AdminOrderedCallableAdmin)
site.register(Color2, CustomTemplateFilterColorAdmin)
site.register(Simple, AttributeErrorRaisingAdmin)
site.register(UserMessenger, MessageTestingAdmin)
site.register(Choice, ChoiceList)
site.register(ParentWithDependentChildren, ParentWithDependentChildrenAdmin)
site.register(EmptyModelHidden, EmptyModelHiddenAdmin)
site.register(EmptyModelVisible, EmptyModelVisibleAdmin)
site.register(EmptyModelMixin, EmptyModelMixinAdmin)
site.register(StumpJoke)
site.register(Recipe)
site.register(Ingredient)
site.register(NotReferenced)
site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)
site.register(Group, GroupAdmin)
# Used to test URL namespaces
site2 = admin.AdminSite(name="namespaced_admin")
site2.register(User, UserAdmin)
site2.register(Group, GroupAdmin)
site2.register(ParentWithUUIDPK)
site2.register(
RelatedWithUUIDPKModel,
list_display=['pk', 'parent'],
list_editable=['parent'],
raw_id_fields=['parent'],
)
site2.register(Person, save_as_continue=False)
site7 = admin.AdminSite(name="admin7")
site7.register(Article, ArticleAdmin2)
site7.register(Section)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for remote eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import remote
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
JOB_NAME = "remote_device"
ALT_JOB_NAME = "alt_remote_device"
def get_server_def(job_name, local_server_port, remote_server_addresses,
task_index):
"""Returns a server def with a single job + multiple tasks."""
cluster_def = cluster_pb2.ClusterDef()
job_def = cluster_def.job.add()
job_def.name = job_name
job_def.tasks[0] = "localhost:%d" % local_server_port
for i, remote_server_address in enumerate(remote_server_addresses, start=1):
job_def.tasks[i] = remote_server_address
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_def,
job_name=job_name,
task_index=task_index,
protocol="grpc")
return server_def
class RemoteExecutionTest(test.TestCase, parameterized.TestCase):
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(RemoteExecutionTest, self).__init__(methodName)
self._cached_server1 = server_lib.Server.create_local_server()
self._cached_server2 = server_lib.Server.create_local_server()
self._cached_server1_target = self._cached_server1.target[len("grpc://"):]
self._cached_server2_target = self._cached_server2.target[len("grpc://"):]
def setUp(self):
super(RemoteExecutionTest, self).setUp()
local_port = pywrap_tfe.TF_PickUnusedPortOrDie()
context.set_server_def(
server_def=get_server_def(
JOB_NAME,
local_server_port=local_port,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
def tearDown(self):
super(RemoteExecutionTest, self).tearDown()
# Clear the current device scope and reset the context to avoid polluting
# other test cases.
ops.device(None).__enter__()
context._reset_context()
@test_util.run_in_async_and_sync_mode
@test_util.run_gpu_only
def testGpuToRemoteCopy(self):
"""Tests that the remote copy happens satisfactorily."""
x1 = array_ops.ones([2, 2]).gpu()
with ops.device("/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x2 = x1._copy() # pylint: disable=protected-access
np.testing.assert_array_equal(x1.numpy(), x2.numpy())
@test_util.run_in_async_and_sync_mode
@test_util.run_gpu_only
def testGpuToRemoteOp(self):
with ops.device("gpu:0"):
x = array_ops.ones([2, 2])
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
y = math_ops.matmul(x, x)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testDefunMatmul(self):
"""Basic remote eager execution with defun."""
mm_defun = function.defun(math_ops.matmul)
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
with ops.device("job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME):
x2 = array_ops.ones([2, 2])
y = mm_defun(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testSimpleMatmul(self):
"""Basic remote eager execution."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
with ops.device("job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME):
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
def testEagerPyFuncPlacement(self):
if not ops.executing_eagerly_outside_functions():
return
def f(x):
return math_ops.square(x)
with ops.device("/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
const_op = constant_op.constant(3.0, dtype=dtypes.float32)
# PyFuncOp should be placed on the localhost's address space.
py_func_op = script_ops.eager_py_func(
func=f, inp=[const_op], Tout=dtypes.float32)
self.assertEqual(py_func_op.device,
"/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME)
self.assertEqual(self.evaluate(py_func_op), 9.0)
@test_util.run_in_async_and_sync_mode
def testSimpleWeightRead(self):
"""Basic remote eager weight read."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
w = resource_variable_ops.ResourceVariable([[2.0]])
loss = w * w
np.testing.assert_array_equal([[4.0]], loss.numpy())
@test_util.run_in_async_and_sync_mode
def testTapeWeightRead(self):
"""Remote eager weight read in a tape."""
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
w = resource_variable_ops.ResourceVariable([[3.0]])
with backprop.GradientTape() as tape:
loss = w * w
grad = tape.gradient(loss, w)
np.testing.assert_array_equal([[9.0]], loss.numpy())
np.testing.assert_array_equal([[6.0]], grad.numpy())
@test_util.run_in_async_and_sync_mode
def testServerDefChanged(self):
"""Update server def, and run ops on new cluster."""
context.set_server_def(
server_def=get_server_def(
ALT_JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % ALT_JOB_NAME):
x1 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# Set the server def back to JOB_NAME
context.set_server_def(
server_def=get_server_def(
JOB_NAME,
local_server_port=0,
remote_server_addresses=[
self._cached_server1_target, self._cached_server2_target
],
task_index=0))
with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
x1 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x1)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testConnectToRemoteServer(self):
"""Basic server connection."""
context._reset_context()
remote.connect_to_remote_host(self._cached_server1_target)
with ops.device("job:worker/replica:0/task:0/device:CPU:0"):
x1 = array_ops.ones([2, 2])
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
@test_util.run_in_async_and_sync_mode
def testContextDeviceUpdated(self):
"""Tests that the context device is correctly updated."""
with ops.device("cpu:0"):
x1 = array_ops.ones([2, 2])
x2 = array_ops.ones([2, 2])
y = math_ops.matmul(x1, x2)
np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
# `y` is placed on the local CPU as expected.
self.assertEqual(y.device,
"/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
|
# -*- coding: utf-8 -*-
"""
"""
import os
import re
import copy
import odml
import csv
import datetime
import xlrd
from future.utils import iteritems
from six import string_types
# Workaround Python 2 and 3 unicode handling.
try:
unicode = unicode
except NameError:
unicode = str
class OdmlTable(object):
"""
Class to create tables in different formats from odml-files
:param show_all_sections: if set to False, information about the section
like the path or name of the section wont be in the table again, if
they are same as in the line before
:param show_all_properties: if set to False, information about the property
like the name or definition of the property wont be in the table again,
if they are same as in the line before
tables with an emptycolumn
:type show_all_sections: bool
:type show_all_properties: bool
"""
def __init__(self, load_from=None):
self.show_odml_warnings = False
self._odmldict = None
self._docdict = None
self.odtypes = OdmlDtypes()
self._header = ["Path", "PropertyName", "Value", "odmlDatatype"]
self._header_titles = {"Path": "Path to Section",
"SectionName": "Section Name",
"SectionType": "Section Type",
"SectionDefinition": "Section Definition",
"PropertyName": "Property Name",
"PropertyDefinition": "Property Definition",
"Value": "Value",
"DataUnit": "Data Unit",
"DataUncertainty": "Data Uncertainty",
"odmlDatatype": "odML Data Type"}
self.show_all_sections = False
self.show_all_properties = False
self._SECTION_INF = ["SectionType", "SectionDefinition"]
self._PROPERTY_INF = ["PropertyDefinition", "DataUnit", "DataUncertainty", "odmlDatatype"]
if load_from is not None:
if isinstance(load_from, string_types):
filename, file_extension = os.path.splitext(load_from)
if file_extension == '.odml':
self.load_from_file(load_from)
elif file_extension == '.xls':
self.load_from_xls_table(load_from)
elif file_extension == '.csv':
self.load_from_csv_table(load_from)
else:
raise IOError('Can not read file format "%s". odMLtables '
'supports only .odml, .xls and .csv files.')
elif isinstance(load_from, odml.doc.BaseDocument):
self.load_from_odmldoc(load_from)
elif callable(load_from):
self.load_from_function(load_from)
def __create_odmldict(self, doc):
"""
function to create the odml-dict
"""
# In odml 1.4 properties are the leaves of the odml tree; unwrap from there.
props = list(doc.iterproperties())
odmldict = [{'Path': p.get_path(),
'SectionType': p.parent.type,
'SectionDefinition': p.parent.definition,
'PropertyDefinition': p.definition,
'Value': p.values,
'DataUnit': p.unit,
'DataUncertainty': p.uncertainty,
'odmlDatatype': p.dtype}
for p in props]
odmldict = self._sort_odmldict(odmldict)
return odmldict
def _sort_odmldict(self, odmldict):
# switching order of ':' and '/' in alphabet, to get properties listed first and
# subsections listed second
switch = {'/': ':', ':': '/'}
weight_func = lambda word: [switch[c] if c in switch else c for c in word]
return sorted(odmldict, key=lambda k: weight_func(k['Path']))
def _split_path(self, dic):
path, property_name = dic['Path'].split(':')
section_name = path.split('/')[-1]
return path, section_name, property_name
def _create_documentdict(self, doc):
attributes = ['author', 'date', 'repository', 'version']
docdict = {att: getattr(doc, att) for att in attributes}
return docdict
# TODO: better exception
def load_from_file(self, load_from):
"""
loads the odml-data from an odml-file
:param load_from: the path to the odml-file
:type load_from: string
"""
doc = odml.load(load_from, show_warnings=self.show_odml_warnings)
# resolve links and includes
doc.finalize()
self._odmldict = self.__create_odmldict(doc)
self._docdict = self._create_documentdict(doc)
def load_from_odmldoc(self, doc):
"""
loads the odml-data from an odml-document
:param load_from: the odml-document
:type load_from: odml-document
"""
self._odmldict = self.__create_odmldict(doc)
self._docdict = self._create_documentdict(doc)
def load_from_function(self, odmlfct):
"""
loads the odml-data by using a function that creates an odml-document
:param load_from: function that returns an odml-document
:type load_from: function
"""
doc = odmlfct()
self._odmldict = self.__create_odmldict(doc)
self._docdict = self._create_documentdict(doc)
def _get_docdict(self, row):
'''
supplementory function to reconstruct self._docdict from first row in
table
:param row: list of values in first row of table
:return: None
'''
if self._docdict == None:
self._docdict = {}
for col_id in list(range(int(len(row) / 2))):
if row[2 * col_id + 1] != '':
key = row[2 * col_id + 1]
# in case last entry was empty and document
# info is longer than header, this cell will
# not be present
if 2 * col_id + 2 == len(row):
value = ''
else:
value = row[2 * col_id + 2]
self._docdict[key] = value
@staticmethod
def get_xls_header(load_from):
'''
Providing non-empty xls header entries of first sheet for odml tables
gui only
:return:
'''
workbook = xlrd.open_workbook(load_from)
for sheet_name in workbook.sheet_names():
worksheet = workbook.sheet_by_name(sheet_name)
row = 0
# read document information if present
if worksheet.cell(0, 0).value == 'Document Information':
# doc_row = [r.value for r in worksheet.row(row)]
# self._get_docdict(doc_row)
row += 1
# get number of non-empty odml colums
header_row = worksheet.row(row)
# read the header
header = [h.value for h in header_row if h.ctype != 0]
return header
def load_from_xls_table(self, load_from):
"""
loads the odml-data from a xls-file. To load the odml, at least Value,
Path, PropertyName and odmlDatatype must be given in the table. Also,
the header_titles must be correct
:param load_from: name(path) of the xls-file
:type load_from: string
"""
self._odmldict = []
self._docdict = {}
# create a inverted header_titles dictionary for an inverted lookup
inv_header_titles = {v: k for (k, v) in list(self._header_titles.items())}
workbook = xlrd.open_workbook(load_from)
for sheet_name in workbook.sheet_names():
worksheet = workbook.sheet_by_name(sheet_name)
row_id = 0
# read document information if present
if worksheet.cell(0, 0).value == 'Document Information':
doc_row = [r.value for r in worksheet.row(row_id)]
self._get_docdict(doc_row)
row_id += 1
# get number of non-empty odml colums
header_row = worksheet.row(row_id)
# read the header
header = [h.value for h in header_row]
# strip trailing empty cells from header
for i in list(range(len(header_row) - 1, -1, -1)):
if header_row[i].ctype == 0:
header.pop(i)
else:
break
n_cols = len(header)
try:
self._header = [inv_header_titles[h] if h != '' else None for h in header]
except KeyError as e:
if hasattr(e, 'message'):
m = e.message
else:
m = str(e)
raise ValueError('%s is not a valid header title.' % m)
row_id += 1
# get column ids of non-empty header cells
header_title_ids = {h: id for id, h in
enumerate(self._header) if h != ''}
header_title_order = {id: h for id, h in
enumerate(self._header) if h != ''}
must_haves = ["Path", "PropertyName", "Value", "odmlDatatype"]
# check, if all of the needed information are in the table
if any([(m not in self._header) for m in must_haves]):
err_msg = ("your table has to contain all of the following " +
" attributes: {0}").format(must_haves)
raise ValueError(err_msg)
previous_dic = {"Path": None,
"SectionType": None,
"SectionDefinition": None,
"PropertyDefinition": None,
"Value": None,
"DataUnit": None,
"DataUncertainty": None,
"odmlDatatype": None}
header_end_row_id = row_id
for row_id in range(header_end_row_id, worksheet.nrows):
row = worksheet.row_values(row_id)
new_dic = {"Path": None,
"SectionType": None,
"SectionDefinition": None,
"PropertyDefinition": None,
"Value": None,
"DataUnit": None,
"DataUncertainty": None,
"odmlDatatype": None}
for col_n in list(range(len(row))):
# using only columns with header
if col_n in header_title_order and header_title_order[col_n] is not None:
new_dic[header_title_order[col_n]] = row[col_n]
if 'PropertyName' in new_dic and new_dic['PropertyName'] == '':
new_dic['PropertyName'] = previous_dic['Path'].split(':')[1]
for key in self._PROPERTY_INF:
new_dic[key] = previous_dic[key]
# copy section info if not present for this row
if new_dic['Path'] == '':
for key in self._SECTION_INF:
new_dic[key] = previous_dic[key]
new_dic['Path'] = '{}:{}'.format(previous_dic['Path'].split(':')[0],
new_dic['PropertyName'])
else:
# update path and remove section and property names
new_dic['Path'] = new_dic['Path'] + ':' + new_dic['PropertyName']
new_dic.pop('PropertyName')
if 'SectionName' in new_dic:
new_dic.pop('SectionName')
# convert to python datatypes
dtype = new_dic['odmlDatatype']
value = self._convert_to_python_type(new_dic['Value'], dtype, workbook.datemode)
new_dic['Value'] = [value]
# same section, same property
if previous_dic['Path'] == new_dic['Path']:
# old section, old property
previous_dic['Value'].extend(new_dic['Value'])
continue
# new property
else:
# explicitely converting empty cells ('') to None for compatiblity with loading
# from odml documents
for k, v in new_dic.items():
if v == '':
new_dic[k] = None
if new_dic['Value'] == ['']:
new_dic['Value'] = []
# converting values of this property
new_dic['Value'] = self.odtypes.to_odml_value(new_dic['Value'],
new_dic['odmlDatatype'])
self._odmldict.append(new_dic)
previous_dic = new_dic
self._odmldict = self._sort_odmldict(self._odmldict)
def _convert_to_python_type(self, value, dtype, datemode):
if ('date' in dtype or 'time' in dtype) and (value != ''):
if isinstance(value, float):
value = xlrd.xldate_as_tuple(value, datemode)
elif isinstance(value, unicode):
# try explicit conversion of unicode like '2000-03-23'
m = re.match('(?P<year>[0-9]{4})-(?P<month>[0-1][0-9])-'
'(?P<day>[0-3][0-9])',
value)
if m:
date_dict = m.groupdict()
value = (int(date_dict['year']),
int(date_dict['month']),
int(date_dict['day']),
0, 0, 0)
else:
raise TypeError('Expected xls date or time object, '
'but got instead %s of %s'
'' % (value, type(value)))
return value
@staticmethod
def get_csv_header(load_from):
'''
Providing non-empty csv header entries of first sheet for odml tables
gui only
:return:
'''
with open(load_from, 'r') as csvfile:
csvreader = csv.reader(csvfile)
row = next(csvreader)
# check if first line contains document information
if row[0] == 'Document Information':
try:
row = next(csvreader)
except StopIteration():
raise IOError('Csv file does not contain header row.'
' Filename "%s"' % load_from)
# get column ids of non-empty header cells
header = [h for h in row if h != '']
return header
# TODO: use normal reader instead of dictreader => much easier!!
def load_from_csv_table(self, load_from):
"""
loads the odmldict from a csv-file containing an odml-table. To load
the odml, at least Value, Path, PropertyName and odmlDatatype must be
given in the table. Also, the header_titles must be correct
:param load_from: name(path) of the csv-file
:type load_from: string
"""
self._odmldict = []
self._docdict = {}
# create a inverted header_titles dictionary for an inverted lookup
inv_header_titles = {v: k for (k, v) in list(self._header_titles.items())}
with open(load_from, 'r') as csvfile:
csvreader = csv.reader(csvfile)
row = next(csvreader)
# check if first line contains document information
if row[0] == 'Document Information':
self._get_docdict(row)
try:
row = next(csvreader)
except StopIteration():
raise IOError('Csv file does not contain header row.'
' Filename "%s"' % load_from)
# get column ids of non-empty header cells
header_title_order = {id: inv_header_titles[h] for id, h in
enumerate(row) if h != ''}
# reconstruct headers
self._header = [inv_header_titles[h] if h != '' else None for h in row]
must_haves = ["Path", "PropertyName", "Value", "odmlDatatype"]
# check, if all of the needed information are in the table
if any([(m not in self._header) for m in must_haves]):
err_msg = ("your table has to contain all of the following " +
" attributes: {0}").format(must_haves)
raise ValueError(err_msg)
current_dic = {"Path": "",
"SectionType": "",
"SectionDefinition": "",
"PropertyDefinition": "",
"Value": "",
"DataUnit": "",
"DataUncertainty": "",
"odmlDatatype": ""}
for row_id, row in enumerate(csvreader):
is_new_property = True
new_dic = {}
for col_n in list(range(len(row))):
# using only columns with header
if col_n in header_title_order:
new_dic[header_title_order[col_n]] = row[col_n]
# listify all values for easy extension later
if 'Value' in new_dic:
if new_dic['Value'] != '':
new_dic['Value'] = [new_dic['Value']]
else:
new_dic['Value'] = []
# update path and remove section and property names
new_dic['Path'] = new_dic['Path'] + ':' + new_dic['PropertyName']
new_dic.pop('PropertyName')
if 'SectionName' in new_dic:
new_dic.pop('SectionName')
# remove empty entries
for k, v in new_dic.items():
if v == '':
new_dic[k] = None
# SAME SECTION: empty path -> reuse old path info
if new_dic['Path'].split(':')[0] == '':
new_dic['Path'] = '{}:{}'.format(current_dic['Path'].split(':')[0],
new_dic['Path'].split(':')[1])
for sec_inf in self._SECTION_INF:
if sec_inf in current_dic:
new_dic[sec_inf] = current_dic[sec_inf]
# SAME PROPERTY: empty property name -> reuse old prop info
if new_dic['Path'].split(':')[1] == '':
new_dic['Path'] = '{}:{}'.format(new_dic['Path'].split(':')[0],
current_dic['Path'].split(':')[1])
for sec_inf in self._PROPERTY_INF:
if sec_inf in current_dic:
new_dic[sec_inf] = current_dic[sec_inf]
# SAME SECTION
if current_dic['Path'].split(':')[0] == new_dic['Path'].split(':')[0]:
# SAME PROPERTY
if current_dic['Path'] == new_dic['Path']:
current_dic['Value'].extend(new_dic['Value'])
is_new_property = False
if is_new_property:
if row_id > 0:
self._odmldict.append(copy.deepcopy(current_dic))
current_dic = new_dic
# copy final property
if row_id == 0:
self._odmldict.append(copy.deepcopy(new_dic))
else:
self._odmldict.append(copy.deepcopy(current_dic))
# value conversion for all properties
for current_dic in self._odmldict:
current_dic['Value'] = self.odtypes.to_odml_value(current_dic['Value'],
current_dic['odmlDatatype'])
self._odmldict = self._sort_odmldict(self._odmldict)
def change_header_titles(self, **kwargs):
"""
Function to change the Name of a column in your table. Be careful with
this function if you want to convert the table back to an odml.
:param Path: Name of the 'Path'-Column in the table
:param SectionName: Name of the 'Section Name'-Column in the table
:param SectionType: Name of the 'Section Type'-Column in the table
:param SectionDefinition: Name of the 'Section Definition'-Column in
the table
:param ProgertyName: Name of the 'Property Name'-Column in the table
:param PropertyDefinition: Name of the 'Property Definition'-Column in
the table
:param Value: Name of the 'Value'-Column in the table
:param DataUnit: Name of the 'Data Unit'-Column in the table
:param DataUncertainty: Name of the 'Data Uncertainty'-Column in the
table
:param odmlDatatype: Name of the 'odML Data Type'-Column in the table
:type Path: string, optional
:type SectionName: string, optional
:type SectionType: string, optional
:type SectionDefinition: string, optional
:type ProgertyName: string, optional
:type PropertyDefinition: string, optional
:type Value: string, optional
:type DataUnit: string, optional
:type DataUncertainty: string, optional
:type odmlDatatype: string, optional
"""
for k in kwargs:
if k in self._header_titles:
self._header_titles[k] = kwargs[k]
else:
errmsg = "{0} is not in the header_title-dictionary. Valid keywords are {1}." \
"".format(k, ', '.join(self._header_titles.keys()))
raise ValueError(errmsg)
def change_header(self, *args, **kwargs):
"""
Function to change the header of the table.
The keywordarguments of the function are the possible columns you can
include into your table; they are listed below, you can also check the
possible options bei looking at the keys of the header_titles
dictionary. They take the number of their position in the table,
starting from left with 1.
The default-header is ['Path', 'Property Name', 'Value', 'odML Data
Type']. These are the columns you need to be able to convert your table
back to an odml-file. Important: You can create tables wich dont
contain any of those four, but they cant be converted back to odml.
:param Path: Position of the 'Path'-Column in the table.
:param SectionName: Position of the 'Section Name'-Column in the table
:param SectionType: Position of the 'Section Type'-Column in the table
:param SectionDefinition: Position of the 'Section Definition'-Column
in the table
:param PropertyName: Position of the 'Property Name'-Column in the
table
:param PropertyDefinition: Position of the 'Property Definition'-Column
in the table
:param Value: Position of the 'Value'-Column in the table
:param DataUnit: Position of the 'Data Unit'-Column in the table
:param DataUncertainty: Position of the 'Data Uncertainty'-Column in
the table
:param odmlDatatype: Position of the 'odML Data Type'-Column in the
table
:type Path: int, optional
:type SectionName: int, optional
:type SectionType: int, optional
:type SectionDefinition: int, optional
:type PropertyName: int, optional
:type PropertyDefinition: int, optional
:type Value: int, optional
:type DataUnit: int, optional
:type DataUncertainty: int, optional
:type odmlDatatype: int, optional
:Example:
mytable.change_header(Path=1, Value=3, odmlDataType=2)
=> outcoming header: ['Path', 'odML Data Type', 'Value']
"""
if args:
if args[0] == 'full':
kwargs = {k: i + 1 for i, k in enumerate(self._header_titles.keys())}
elif args[0] == 'minimal':
kwargs = {k: i + 1 for i, k in enumerate(["Path", "PropertyName", "Value",
"odmlDatatype"])}
# sortieren nach values
keys_sorted = sorted(kwargs, key=kwargs.get)
# check if first element is in range
if kwargs[keys_sorted[0]] <= 0:
errmsg = ("Your smallest argument is {}, but the columns start" +
" at 1").format(kwargs[keys_sorted[0]])
raise ValueError(errmsg)
# TODO: better Exception
max_col = kwargs[keys_sorted[-1]]
# initialize header with enough elements
header = max_col * [None]
if keys_sorted[0] in self._header_titles:
header[kwargs[keys_sorted[0]] - 1] = keys_sorted[0]
else:
raise KeyError(" {} not in header_titles. Available header titles are: {}."
"".format(keys_sorted[0], ', '.join(self._header_titles.keys())))
# check if there are two keys with the same value
for index, key in enumerate(keys_sorted[1:]):
if kwargs[keys_sorted[index]] == kwargs[keys_sorted[index - 1]]:
errmsg = "The keys {0} and {1} both have the value {2}" \
.format(keys_sorted[index - 1],
keys_sorted[index],
kwargs[keys_sorted[index]])
raise KeyError(errmsg)
# TODO: better exception
else:
if key in self._header_titles:
header[kwargs[key] - 1] = key
else:
raise KeyError("{} not in header_titles. Available header titles are: {}."
"".format(key, ', '.join(self._header_titles.keys())))
self._header = header
def consistency_check(self):
"""
check odmldict for consistency regarding dtypes to ensure that data
can be loaded again.
"""
if self._odmldict != None:
for property_dict in self._odmldict:
if property_dict['odmlDatatype'] and \
property_dict['odmlDatatype'] not in self.odtypes.valid_dtypes:
raise TypeError('Non valid dtype "{0}" in odmldict. Valid types are {1}'
''.format(property_dict['odmlDatatype'],
self.odtypes.valid_dtypes))
def _filter(self, filter_func):
"""
remove odmldict entries which do not match filter_func.
"""
# inflate odmldict for filtering
for dic in self._odmldict:
sec_path, dic['PropertyName'] = dic['Path'].split(':')
dic['SectionName'] = sec_path.split('/')[-1]
new_odmldict = [d for d in self._odmldict if filter_func(d)]
deleted_properties = [d for d in self._odmldict if not filter_func(d)]
self._odmldict = new_odmldict
return new_odmldict, deleted_properties
def filter(self, mode='and', invert=False, recursive=False,
comparison_func=lambda x, y: x == y, **kwargs):
"""
filters odml properties according to provided kwargs.
:param mode: Possible values: 'and', 'or'. For 'and' all keyword
arguments must be satisfied for a property to be selected. For 'or'
only one of the keyword arguments must be satisfied for the property
to be selected. Default: 'and'
:param invert: Inverts filter function. Previously accepted properties
are rejected and the other way round. Default: False
:param recursive: Delete also properties attached to subsections of the
mother section and therefore complete branch
:param comparison_func: Function used to compare dictionary entry to
keyword. Eg. 'lambda x,y: x.startswith(y)' in case of strings or
'lambda x,y: x in y' in case of multiple permitted values.
Default: lambda x,y: x==y
:param kwargs: keywords and values used for filtering
:return: None
"""
if not kwargs:
raise ValueError('No filter keywords provided for property filtering.')
if mode not in ['and', 'or']:
raise ValueError('Invalid operation mode "%s". Accepted values are "and", "or".'
'' % (mode))
def filter_func(dict_prop):
keep_property = False
for filter_key, filter_value in iteritems(kwargs):
if filter_key not in dict_prop:
raise ValueError('Key "%s" is missing in property dictionary %s'
'' % (filter_key, dict_prop))
if comparison_func(dict_prop[filter_key], filter_value):
keep_property = True
else:
keep_property = False
if mode == 'or' and keep_property:
break
if mode == 'and' and not keep_property:
break
if invert:
keep_property = not keep_property
return keep_property
_, del_props = self._filter(filter_func=filter_func)
if recursive and len(del_props) > 0:
for del_prop in del_props:
self.filter(invert=True, recursive=True,
comparison_func=lambda x, y: x.startswith(y),
Path=del_prop['Path'])
def merge(self, odmltable, overwrite_values=False, **kwargs):
"""
Merge odmltable into current odmltable.
:param odmltable: OdmlTable object or odML document object
:param overwrite_values: Bool value to indicate whether values of odML Properties should
be merged (appended) or overwritten by the entries of the other odmltable object.
Default is False.
:return:
"""
if hasattr(odmltable, 'convert2odml'):
doc2 = odmltable.convert2odml()
else:
# assuming odmltable is already an odml document
doc2 = odmltable
doc1 = self.convert2odml()
self._merge_odml_sections(doc1, doc2, overwrite_values=overwrite_values, **kwargs)
def update_docprop(prop):
if hasattr(doc1, prop) and hasattr(doc2, prop):
values = [getattr(doc1, prop), getattr(doc2, prop)]
# use properties of basic document, unless this does not exist
common_value = values[0]
if not common_value and values[1]:
common_value = values[1]
setattr(doc1, prop, common_value)
for docprop in ['author', 'date', 'version', 'repository']:
update_docprop(docprop)
self.load_from_odmldoc(doc1)
def _merge_odml_sections(self, sec1, sec2, overwrite_values=False, **kwargs):
"""
Merging subsections of odml sections
"""
for childsec2 in sec2.sections:
sec_name = childsec2.name
if not sec_name in sec1.sections:
sec1.append(childsec2)
else:
# this merges odml sections and properties, but always appends values
sec1[sec_name].merge(childsec2, **kwargs)
if overwrite_values:
for prop_source in sec2.iterproperties():
prop_path = prop_source.get_path()
prop_destination = sec1.get_property_by_path(prop_path)
prop_destination.values = prop_source.values
def write2file(self, save_to):
"""
write the table to the specific file
"""
raise NotImplementedError()
self.consistency_check()
def convert2odml(self):
"""
Generates odml representation of odmldict and returns it as odml document.
:return:
"""
doc = odml.Document()
oldpath = ''
parent = ''
self.consistency_check()
for doc_attr_name, doc_attr_value in self._docdict.items():
setattr(doc, doc_attr_name, doc_attr_value)
for dic in self._odmldict:
# build property object
prop_name = self._split_path(dic)[-1]
prop = odml.Property(name=prop_name,
values=dic['Value'],
dtype=dic['odmlDatatype'])
if 'PropertyDefinition' in dic:
prop.definition = dic['PropertyDefinition']
if 'DataUnit' in dic:
prop.unit = dic['DataUnit']
if 'DataUncertainty' in dic:
prop.uncertainty = dic['DataUncertainty']
sec_path = dic['Path'].split(':')[0]
current_sec = doc
# build section tree for this property
for sec_pathlet in sec_path.strip('/').split('/'):
# append new section if not present yet
if sec_pathlet not in current_sec.sections:
current_sec.append(odml.Section(name=sec_pathlet))
current_sec = current_sec[sec_pathlet]
if 'SectionType' in dic:
current_sec.type = dic['SectionType']
if 'SectionDefinition' in dic:
current_sec.definition = dic['SectionDefinition']
current_sec.append(prop)
return doc
def write2odml(self, save_to):
"""
writes the loaded odmldict (e.g. from an csv-file) to an odml-file
"""
doc = self.convert2odml()
odml.tools.xmlparser.XMLWriter(doc).write_file(save_to, local_style=True)
class OdmlDtypes(object):
"""
Class to handle odml data types, synonyms and default values.
:param basedtypes_dict: Dictionary containing additional basedtypes to
use as keys and default values as values.
Default: None
:param synonyms_dict: Dictionary containing additional synonyms to use as
keys and basedtypes to associate as values.
Default: None
:return: None
"""
default_basedtypes = [d.name for d in odml.DType]
default_synonyms = {'bool': 'boolean', 'datetime.date': 'date', 'datetime.time': 'time',
'integer': 'int', 'str': 'string'} # mapping synonym -> default type
def __init__(self, basedtypes_dict=None, synonyms_dict=None):
self._basedtypes = copy.copy(self.default_basedtypes)
self._synonyms = self.default_synonyms.copy()
self._validDtypes = None
# update default values with used defined defaults
if basedtypes_dict is not None:
self._basedtypes.update(basedtypes_dict)
if synonyms_dict is not None:
self._synonyms.update(synonyms_dict)
@property
def valid_dtypes(self):
# if not done yet: generate validDtype list with unique entries
if self._validDtypes == None:
validDtypes = list(self._basedtypes)
for syn in list(self._synonyms):
if syn not in validDtypes:
validDtypes.append(syn)
self._validDtypes = validDtypes
return self._validDtypes
@property
def synonyms(self):
return self._synonyms
def add_synonym(self, basedtype, synonym):
"""
Setting user specific default synonyms
:param basedtype: Accepted basedtype of OdmlDtypes or None. None
delete already existing synonym
:param synonym: Synonym to be connected to basedtype
:return: None
"""
if basedtype not in self._basedtypes:
if basedtype is None and synonym in self._synonyms:
self._synonyms.pop(synonym)
else:
raise ValueError(
'Can not add synonym "%s=%s". %s is not a base dtype.'
'Valid basedtypes are %s.' % (
basedtype, synonym, basedtype, self.basedtypes))
elif synonym is None or synonym == '':
raise ValueError('"%s" is not a valid synonym.' % synonym)
else:
self._synonyms.update({synonym: basedtype})
@property
def basedtypes(self):
return list(self._basedtypes)
def to_odml_value(self, value, dtype):
"""
Convert single value entry or list of value entries to odml compatible format
"""
if value == '':
value = []
if not isinstance(value, list):
value = [value]
for i in range(len(value)):
value[i] = self._convert_single_value(value[i], dtype)
return value
def _convert_single_value(self, value, dtype):
if dtype == '':
return value
#
# if value == '':
# return None
if dtype in self._synonyms:
dtype = self._synonyms[dtype]
if dtype == 'datetime':
if isinstance(value, datetime.datetime):
result = value
else:
try:
result = datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
except TypeError:
result = datetime.datetime(*value)
elif dtype == 'date':
if isinstance(value, datetime.date):
result = value
else:
try:
result = datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
try:
result = datetime.datetime.strptime(value, '%d-%m-%Y').date()
except ValueError:
raise ValueError(
'The value "%s" can not be converted to a date as '
'it has not format yyyy-mm-dd or dd-mm-yyyy' % value)
except TypeError:
result = datetime.datetime(*value).date()
elif dtype == 'time':
if isinstance(value, datetime.time):
result = value
else:
try:
result = datetime.datetime.strptime(value, '%H:%M:%S').time()
except TypeError:
try:
result = datetime.datetime(*value).time()
except ValueError:
result = datetime.time(*value[-3:])
elif dtype == 'int':
result = int(value)
elif dtype == 'float':
result = float(value)
elif dtype == 'boolean':
result = bool(value)
elif dtype in ['string', 'text', 'url', 'person']:
result = str(value)
else:
result = value
return result
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Run tests in the farm sub-directory. Designed for nose."""
import difflib
import filecmp
import fnmatch
import glob
import os
import re
import shutil
import sys
from nose.plugins.skip import SkipTest
from tests.helpers import run_command
from tests.backtest import execfile # pylint: disable=redefined-builtin
from coverage.debug import _TEST_NAME_FILE
def test_farm(clean_only=False):
"""A test-generating function for nose to find and run."""
for fname in glob.glob("tests/farm/*/*.py"):
case = FarmTestCase(fname, clean_only)
yield (case,)
# "rU" was deprecated in 3.4
READ_MODE = "rU" if sys.version_info < (3, 4) else "r"
class FarmTestCase(object):
"""A test case from the farm tree.
Tests are short Python script files, often called run.py:
copy("src", "out")
run('''
coverage run white.py
coverage annotate white.py
''', rundir="out")
compare("out", "gold", "*,cover")
clean("out")
Verbs (copy, run, compare, clean) are methods in this class. FarmTestCase
has options to allow various uses of the test cases (normal execution,
cleaning-only, or run and leave the results for debugging).
"""
def __init__(self, runpy, clean_only=False, dont_clean=False):
"""Create a test case from a run.py file.
`clean_only` means that only the clean() action is executed.
`dont_clean` means that the clean() action is not executed.
"""
self.description = runpy
self.dir, self.runpy = os.path.split(runpy)
self.clean_only = clean_only
self.dont_clean = dont_clean
self.ok = True
def cd(self, newdir):
"""Change the current directory, and return the old one."""
cwd = os.getcwd()
os.chdir(newdir)
return cwd
def addtopath(self, directory):
"""Add `directory` to the path, and return the old path."""
oldpath = sys.path[:]
if directory is not None:
sys.path.insert(0, directory)
return oldpath
def restorepath(self, path):
"""Restore the system path to `path`."""
sys.path = path
def __call__(self):
"""Execute the test from the run.py file.
"""
if _TEST_NAME_FILE: # pragma: debugging
with open(_TEST_NAME_FILE, "w") as f:
f.write(self.description.replace("/", "_"))
cwd = self.cd(self.dir)
# Prepare a dictionary of globals for the run.py files to use.
fns = """
copy run runfunc clean skip
compare contains contains_any doesnt_contain
""".split()
if self.clean_only:
glo = dict((fn, self.noop) for fn in fns)
glo['clean'] = self.clean
else:
glo = dict((fn, getattr(self, fn)) for fn in fns)
if self.dont_clean: # pragma: not covered
glo['clean'] = self.noop
old_mods = dict(sys.modules)
try:
execfile(self.runpy, glo)
except Exception:
self.ok = False
raise
finally:
self.cd(cwd)
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
to_del = [m for m in sys.modules if m not in old_mods]
for m in to_del:
del sys.modules[m]
def run_fully(self): # pragma: not covered
"""Run as a full test case, with setUp and tearDown."""
self.setUp()
try:
self()
finally:
self.tearDown()
def fnmatch_list(self, files, file_pattern):
"""Filter the list of `files` to only those that match `file_pattern`.
If `file_pattern` is None, then return the entire list of files.
Returns a list of the filtered files.
"""
if file_pattern:
files = [f for f in files if fnmatch.fnmatch(f, file_pattern)]
return files
def setUp(self):
"""Test set up, run by nose before __call__."""
# Modules should be importable from the current directory.
self.old_syspath = sys.path[:]
sys.path.insert(0, '')
def tearDown(self):
"""Test tear down, run by nose after __call__."""
# Make sure the test is cleaned up, unless we never want to, or if the
# test failed.
if not self.dont_clean and self.ok: # pragma: part covered
self.clean_only = True
self()
# Restore the original sys.path
sys.path = self.old_syspath
# Functions usable inside farm run.py files
def noop(self, *args, **kwargs):
"""A no-op function to stub out run, copy, etc, when only cleaning."""
pass
def copy(self, src, dst):
"""Copy a directory."""
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def run(self, cmds, rundir="src", outfile=None):
"""Run a list of commands.
`cmds` is a string, commands separated by newlines.
`rundir` is the directory in which to run the commands.
`outfile` is a filename to redirect stdout to.
"""
cwd = self.cd(rundir)
if outfile:
fout = open(outfile, "a+")
try:
for cmd in cmds.split("\n"):
cmd = cmd.strip()
if not cmd:
continue
retcode, output = run_command(cmd)
print(output.rstrip())
if outfile:
fout.write(output)
if retcode:
raise Exception("command exited abnormally")
finally:
if outfile:
fout.close()
self.cd(cwd)
def runfunc(self, fn, rundir="src", addtopath=None):
"""Run a function.
`fn` is a callable.
`rundir` is the directory in which to run the function.
"""
cwd = self.cd(rundir)
oldpath = self.addtopath(addtopath)
try:
fn()
finally:
self.cd(cwd)
self.restorepath(oldpath)
def compare(
self, dir1, dir2, file_pattern=None, size_within=0,
left_extra=False, right_extra=False, scrubs=None
):
"""Compare files matching `file_pattern` in `dir1` and `dir2`.
`dir2` is interpreted as a prefix, with Python version numbers appended
to find the actual directory to compare with. "foo" will compare
against "foo_v241", "foo_v24", "foo_v2", or "foo", depending on which
directory is found first.
`size_within` is a percentage delta for the file sizes. If non-zero,
then the file contents are not compared (since they are expected to
often be different), but the file sizes must be within this amount.
For example, size_within=10 means that the two files' sizes must be
within 10 percent of each other to compare equal.
`left_extra` true means the left directory can have extra files in it
without triggering an assertion. `right_extra` means the right
directory can.
`scrubs` is a list of pairs, regexes to find and literal strings to
replace them with to scrub the files of unimportant differences.
An assertion will be raised if the directories fail one of their
matches.
"""
# Search for a dir2 with a version suffix.
version_suff = ''.join(map(str, sys.version_info[:3]))
while version_suff:
trydir = dir2 + '_v' + version_suff
if os.path.exists(trydir):
dir2 = trydir
break
version_suff = version_suff[:-1]
assert os.path.exists(dir1), "Left directory missing: %s" % dir1
assert os.path.exists(dir2), "Right directory missing: %s" % dir2
dc = filecmp.dircmp(dir1, dir2)
diff_files = self.fnmatch_list(dc.diff_files, file_pattern)
left_only = self.fnmatch_list(dc.left_only, file_pattern)
right_only = self.fnmatch_list(dc.right_only, file_pattern)
show_diff = True
if size_within:
# The files were already compared, use the diff_files list as a
# guide for size comparison.
wrong_size = []
for f in diff_files:
with open(os.path.join(dir1, f), "rb") as fobj:
left = fobj.read()
with open(os.path.join(dir2, f), "rb") as fobj:
right = fobj.read()
size_l, size_r = len(left), len(right)
big, little = max(size_l, size_r), min(size_l, size_r)
if (big - little) / float(little) > size_within/100.0:
# print "%d %d" % (big, little)
# print "Left: ---\n%s\n-----\n%s" % (left, right)
wrong_size.append("%s (%s,%s)" % (f, size_l, size_r))
if wrong_size:
print("File sizes differ between %s and %s: %s" % (
dir1, dir2, ", ".join(wrong_size)
))
# We'll show the diff iff the files differed enough in size.
show_diff = bool(wrong_size)
if show_diff:
# filecmp only compares in binary mode, but we want text mode. So
# look through the list of different files, and compare them
# ourselves.
text_diff = []
for f in diff_files:
with open(os.path.join(dir1, f), READ_MODE) as fobj:
left = fobj.read()
with open(os.path.join(dir2, f), READ_MODE) as fobj:
right = fobj.read()
if scrubs:
left = self._scrub(left, scrubs)
right = self._scrub(right, scrubs)
if left != right:
text_diff.append(f)
left = left.splitlines()
right = right.splitlines()
print("\n".join(difflib.Differ().compare(left, right)))
assert not text_diff, "Files differ: %s" % text_diff
if not left_extra:
assert not left_only, "Files in %s only: %s" % (dir1, left_only)
if not right_extra:
assert not right_only, "Files in %s only: %s" % (dir2, right_only)
def _scrub(self, strdata, scrubs):
"""Scrub uninteresting data from the payload in `strdata`.
`scrubs` is a list of (find, replace) pairs of regexes that are used on
`strdata`. A string is returned.
"""
for rgx_find, rgx_replace in scrubs:
strdata = re.sub(rgx_find, re.escape(rgx_replace), strdata)
return strdata
def contains(self, filename, *strlist):
"""Check that the file contains all of a list of strings.
An assert will be raised if one of the arguments in `strlist` is
missing in `filename`.
"""
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
assert s in text, "Missing content in %s: %r" % (filename, s)
def contains_any(self, filename, *strlist):
"""Check that the file contains at least one of a list of strings.
An assert will be raised if none of the arguments in `strlist` is in
`filename`.
"""
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
if s in text:
return
assert False, "Missing content in %s: %r [1 of %d]" % (
filename, strlist[0], len(strlist),
)
def doesnt_contain(self, filename, *strlist):
"""Check that the file contains none of a list of strings.
An assert will be raised if any of the strings in strlist appears in
`filename`.
"""
with open(filename, "r") as fobj:
text = fobj.read()
for s in strlist:
assert s not in text, "Forbidden content in %s: %r" % (filename, s)
def clean(self, cleandir):
"""Clean `cleandir` by removing it and all its children completely."""
# rmtree gives mysterious failures on Win7, so retry a "few" times.
# I've seen it take over 100 tries, so, 1000! This is probably the
# most unpleasant hack I've written in a long time...
tries = 1000
while tries: # pragma: part covered
if os.path.exists(cleandir):
try:
shutil.rmtree(cleandir)
except OSError: # pragma: not covered
if tries == 1:
raise
else:
tries -= 1
continue
break
def skip(self, msg=None):
"""Skip the current test."""
raise SkipTest(msg)
def main(): # pragma: not covered
"""Command-line access to test_farm.
Commands:
run testcase ... - Run specific test case(s)
out testcase ... - Run test cases, but don't clean up, leaving output.
clean - Clean all the output for all tests.
"""
op = 'help'
try:
op = sys.argv[1]
except IndexError:
pass
if op == 'run':
# Run the test for real.
for test_case in sys.argv[2:]:
case = FarmTestCase(test_case)
case.run_fully()
elif op == 'out':
# Run the test, but don't clean up, so we can examine the output.
for test_case in sys.argv[2:]:
case = FarmTestCase(test_case, dont_clean=True)
case.run_fully()
elif op == 'clean':
# Run all the tests, but just clean.
for test in test_farm(clean_only=True):
test[0].run_fully()
else:
print(main.__doc__)
# So that we can run just one farm run.py at a time.
if __name__ == '__main__':
main()
|
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of14']
class queue_desc_prop(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = queue_desc_prop.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = queue_desc_prop()
obj.type = reader.read("!H")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("queue_desc_prop {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class experimenter(queue_desc_prop):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, exp_type=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if exp_type != None:
self.exp_type = exp_type
else:
self.exp_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.experimenter = reader.read("!L")[0]
obj.exp_type = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.exp_type != other.exp_type: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("exp_type = ");
q.text("%#x" % self.exp_type)
q.breakable()
q.text('}')
queue_desc_prop.subtypes[65535] = experimenter
class max_rate(queue_desc_prop):
type = 2
def __init__(self, rate=None):
if rate != None:
self.rate = rate
else:
self.rate = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!H", self.rate))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = max_rate()
_type = reader.read("!H")[0]
assert(_type == 2)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.rate = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.rate != other.rate: return False
return True
def pretty_print(self, q):
q.text("max_rate {")
with q.group():
with q.indent(2):
q.breakable()
q.text("rate = ");
q.text("%#x" % self.rate)
q.breakable()
q.text('}')
queue_desc_prop.subtypes[2] = max_rate
class min_rate(queue_desc_prop):
type = 1
def __init__(self, rate=None):
if rate != None:
self.rate = rate
else:
self.rate = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!H", self.rate))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = min_rate()
_type = reader.read("!H")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.rate = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.rate != other.rate: return False
return True
def pretty_print(self, q):
q.text("min_rate {")
with q.group():
with q.indent(2):
q.breakable()
q.text("rate = ");
q.text("%#x" % self.rate)
q.breakable()
q.text('}')
queue_desc_prop.subtypes[1] = min_rate
|
|
from Config import JConfig
from Config import JConfigEnum
from Config import JConfigInt
from Config import JConfigString
from Config import JConfigHex
from Config import JConfigBool
from Config import JConfigTristate
from Item import JConfigItem
class Dialog:
def __init__(self):
pass
@staticmethod
def prompt_config(config, pre_def=None):
pass
class CMDDialog(Dialog):
def __init__(self):
Dialog.__init__(self)
@staticmethod
def prompt_config(config, pre_def=None):
assert isinstance(config, JConfig)
if not config.is_visible():
return
config.parse()
for item in config.get_items():
if item.is_visible():
item_type = item.get_type()
if item_type is 'enum':
CMDDialog.prompt_enum(item, pre_def)
elif item_type is 'int':
CMDDialog.prompt_int(item, pre_def)
elif item_type is 'hex':
CMDDialog.prompt_hex(item, pre_def)
elif item_type is 'bool':
CMDDialog.prompt_bool(item, pre_def)
elif item_type is 'tristate':
CMDDialog.prompt_tristate(item, pre_def)
elif item_type is 'string':
CMDDialog.prompt_string(item, pre_def)
for child in config.get_childs():
child_dialog = CMDDialog()
child_dialog.prompt_config(child, pre_def)
@staticmethod
def print_help(item):
if not isinstance(item, JConfigItem):
return
for hdlin in item.get_help():
print(hdlin)
@staticmethod
def prompt_enum(item, predef=None):
if not isinstance(item, JConfigEnum):
return
if not item.is_visible():
return
if item.is_forced():
item.set_user_value(item.get_default_value())
return
name = item.get_name()
if (predef is not None) and (name in predef):
estr = predef[name]
idx = item.get_enum().index(estr)
item.set_user_value(idx)
return
print('\nCONFIG_{0}'.format(item.get_name()))
elements = item.get_elements()
for idx, ele in enumerate(elements):
print('{0} {1}'.format(idx, ele))
val = 'h'
while val == 'h' or val == '':
val = raw_input('{0} (0 ~ {1}) : '.format(item.get_prompt(), len(elements) - 1))
if val == 'h':
CMDDialog.print_help(item)
elif val == '':
val = item.get_default_value()
if val is not '':
item.set_user_value(val)
else:
try:
item.set_user_value(val)
except ValueError as ve:
val = 'h'
print(ve)
print('selected item is {}\n'.format(item.get_user_value()))
@staticmethod
def prompt_bool(item, predef=None):
if not isinstance(item, JConfigBool):
return
if not item.is_visible():
return
if item.is_forced():
item.set_user_value(item.get_default_value())
return
name = item.get_name()
if (predef is not None) and (name in predef):
item.set_user_value(predef[name])
return
print('\nCONFIG_{0}'.format(item.get_name()))
val = 'h'
while val == 'h' or val == '':
val = raw_input('{0} : '.format(item.get_prompt()))
if val == 'h':
CMDDialog.print_help(item)
elif val == '':
val = item.get_default_value()
if val is not '':
item.set_user_value(val)
else:
try:
item.set_user_value(val)
except ValueError as ve:
val = 'h'
print(ve)
print('{0} is set to {1}'.format('CONFIG_' + item.get_name(), val))
@staticmethod
def prompt_tristate(item, predef=None):
if not isinstance(item, JConfigTristate):
return
if not item.is_visible():
return
if item.is_forced():
item.set_user_value(item.get_default_value())
return
name = item.get_name()
if (predef is not None) and (name in predef):
item.set_user_value(predef[name])
return
print('\nCONFIG_{0}'.format(item.get_name()))
val = 'h'
while val == 'h' or val == '':
val = raw_input('{0} : '.format(item.get_prompt()))
if val == 'h':
CMDDialog.print_help(item)
elif val == '':
val = item.get_default_value()
if val is not '':
item.set_user_value(val)
else:
print('No default value')
CMDDialog.print_help(item)
else:
try:
item.set_user_value(val)
except ValueError as ve:
print(ve)
val = 'h'
print('{0} is set to {1}'.format('CONFIG_' + item.get_name(), val))
@staticmethod
def prompt_string(item, predef=None):
if not isinstance(item, JConfigString):
return
if not item.is_visible():
return
if item.is_forced():
item.set_user_value(item.get_default_value())
return
name = item.get_name()
if (predef is not None) and (name in predef):
item.set_user_value(predef[name])
return
print('\nCONFIG_{0}'.format(item.get_name()))
val = 'h'
while val == 'h' or val == '':
val = raw_input('{0} : '.format(item.get_prompt()))
if val == 'h':
CMDDialog.print_help(item)
elif val == '':
val = item.get_default_value()
if val is not '':
item.set_user_value(val)
else:
print('No default value')
CMDDialog.print_help(item)
else:
try:
item.set_user_value(val)
except ValueError as ve:
print(ve)
val = 'h'
item.set_user_value(val)
print('{0} is set to {1}'.format('COFNIG_{}'.format(item.get_name()), item.get_user_value()))
@staticmethod
def prompt_int(item, predef=None):
if not isinstance(item, JConfigInt):
return
if not item.is_visible():
return
if item.is_forced():
item.set_user_value(item.get_default_value())
return
name = item.get_name()
if (predef is not None) and (name in predef):
item.set_user_value(predef[name])
return
print('\nCONFIG_{0}'.format(item.get_name()))
val = 'h'
while val == 'h' or val == '':
val = raw_input('{0} : '.format(item.get_prompt()))
if val == 'h':
CMDDialog.print_help(item)
elif val == '':
val = item.get_default_value()
print(val)
if val is not '':
item.set_user_value(val)
else:
print('No default value')
CMDDialog.print_help(item)
else:
try:
item.set_user_value(val)
except ValueError as ve:
print(ve)
val = 'h'
print('entered value is {}\n'.format(item.get_user_value()))
@staticmethod
def prompt_hex(item, predef=None):
if not isinstance(item, JConfigHex):
return
if not item.is_visible():
return
if item.is_forced():
item.set_user_value(item.get_default_value())
return
name = item.get_name()
if (predef is not None) and (name in predef):
item.set_user_value(predef[name])
return
print('\nCONFIG_{0}'.format(item.get_name()))
val = 'h'
while val == 'h' or val == '':
val = raw_input('{0} : '.format(item.get_prompt()))
if val == 'h':
CMDDialog.print_help(item)
elif val == '':
val = item.get_default_value()
if val is not '':
item.set_user_value(val)
else:
print('No default value')
CMDDialog.print_help(item)
else:
try:
item.set_user_value(val)
except ValueError as ve:
print(ve)
val = 'h'
print('entered value is {}\n'.format(item.get_user_value()))
|
|
#!/usr/bin/env python
"""
CERN@school - Sorting Clusters
See the README.md for more information.
"""
# Import the code needed to manage files.
import os, inspect, glob, argparse
#...for the logging.
import logging as lg
#...for file manipulation.
from shutil import rmtree, copyfile
# Import the JSON library.
import json
# Import the plotting libraries.
import pylab as plt
#from matplotlib import rc
# Uncomment to use LaTeX for the plot text.
#rc('font',**{'family':'serif','serif':['Computer Modern']})
#rc('text', usetex=True)
#
# The main program.
#
if __name__=="__main__":
print("===============================")
print(" CERN@school - Sort Clusters ")
print("===============================")
# Get the datafile path from the command line
parser = argparse.ArgumentParser()
parser.add_argument("inputPath", help="Path to the input dataset.")
parser.add_argument("typePath", help="Path to the particle type JSON.")
parser.add_argument("outputPath", help="The path for the output files.")
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true")
args = parser.parse_args()
## The path to the data file.
datapath = args.inputPath
## The path to the particle type JSON.
typepath = args.typePath
#
if not os.path.exists(typepath):
raise IOError("* ERROR! Cluster type JSON file not found at '%s'!" % (typepath))
## The output path.
outputpath = args.outputPath
# Set the logging level to DEBUG.
if args.verbose:
level=lg.DEBUG
else:
level=lg.INFO
# Configure the logging.
lg.basicConfig(filename='log_sort-clusters.log', filemode='w', level=level)
print("*")
print("* Input path : '%s'" % (datapath))
print("* Particle JSON : '%s'" % (typepath))
print("* Output file : '%s'" % (outputpath))
print("*")
## The path to the cluster properties JSON file.
kluster_properties_path = os.path.join(datapath, "klusters.json")
#
if not os.path.exists(kluster_properties_path):
raise IOError("* ERROR! Can't find cluster properties file at '%s'!" % (kluster_properties_path))
#
with open(kluster_properties_path, "r") as kf:
kd = json.load(kf)
## The path to the cluster types specification JSON file.
with open(typepath, "r") as tf:
types = json.load(tf)
# Create the sorting directories.
## The path to the sorted cluster directory.
sortedpath = os.path.join(outputpath, "sorted")
#
if os.path.isdir(sortedpath):
rmtree(sortedpath)
lg.info(" * Removing directory '%s'..." % (sortedpath))
os.mkdir(sortedpath)
lg.info(" * Creating directory '%s'..." % (sortedpath))
## Dictionary of the clusters { id:type }.
ks = {}
## Dictionary of the cluster sizes { id:size }.
ksizes = {}
## Dictionary of the cluster radii { id:radius }.
krads = {}
## Dictionary of the cluster densities { id:density }.
kdens = {}
## Dictionary of the cluster linearity { id:linearity }.
klins = {}
## Dictionary of the clusters' fraction of inner pixels { id:innerfrac }.
kinners = {}
## Dictionary of the cluster total counts { id:totalcounts }.
kttcs = {}
## Dictionary of the clusters' max. count value { id:maxcounts }.
kmxcs = {}
## List of the cluster types.
alltypes = ["None", "Edge"]
# Add the types from the input types JSON file.
for t in types:
for typename, vals in t.iteritems():
alltypes.append(typename)
## The number of edge clusters.
n_edge_klusters = 0
# Loop over the klusters.
for k in kd:
# Add to the cluster property dictionaries.
ksizes[k["id"]] = k["size"]
krads[k["id"]] = k["radius_uw"]
kdens[k["id"]] = k["density_uw"]
klins[k["id"]] = k["lin_linearity"]
kinners[k["id"]] = k["innerfrac"]
kttcs[k["id"]] = k["totalcounts"]
kmxcs[k["id"]] = k["maxcounts"]
lg.info(" *")
lg.info(" * Cluster ID: '%s'." % (k["id"]))
lg.info(" *")
lg.info(" *--> Size : % 5d [pixels]" % (k["size"]))
lg.info(" *--> Radius : %8.2f [pixels]" % (k["radius_uw"]))
lg.info(" *--> Density : %8.2f [pixels^-1]" % (k["density_uw"]))
lg.info(" *--> Linearity : %8.2f" % (k["lin_linearity"]))
lg.info(" *")
# Check if the cluster is on the edge of the frame.
if k["xmin"] <= 0.1 or k["xmax"] >= 254.9 or k["ymin"] <= 0.1 or k["ymax"] >= 254.9:
ks[k["id"]] = "Edge"
n_edge_klusters += 1
continue
# Loop over the types and check for a match.
for t in types:
for typename, vals in t.iteritems():
size_min = vals["size_min"]
size_max = vals["size_max"]
rad_min = vals["rad_min"]
rad_max = vals["rad_max"]
rho_min = vals["rho_min"]
rho_max = vals["rho_max"]
lin_min = vals["lin_min"]
lin_max = vals["lin_max"]
inr_min = vals["inr_min"]
inr_max = vals["inr_max"]
ttc_min = vals["ttc_min"]
ttc_max = vals["ttc_max"]
mxc_min = vals["mxc_min"]
mxc_max = vals["mxc_max"]
#
# If it isn't, check if it matches the current type.
if (k["size"] >= size_min) and (k["size"] <= size_max) and \
(k["radius_uw"] >= rad_min) and (k["radius_uw"] <= rad_max) and \
(k["lin_linearity"] >= lin_min) and (k["lin_linearity"] <= lin_max) and \
(k["density_uw"] >= rho_min) and (k["density_uw"] <= rho_max) and \
(k["innerfrac"] >= inr_min) and (k["innerfrac"] <= inr_max) and \
(k["totalcounts"] >= ttc_min) and (k["totalcounts"] <= ttc_max) and \
(k["maxcounts"] >= mxc_min) and (k["maxcounts"] <= mxc_max):
lg.info(" *==> Cluster ID '%s' is of type: '%s'." % (k["id"], typename))
lg.info(" *")
if k["id"] in ks.keys():
print("* Conflicting types: '%s' vs. '%s'." % (typename, ks[k["id"]]))
raise Exception("* ERROR! Cluster already sorted - your types.json contains overlapping definitions.")
# Assign the cluster to the current type.
ks[k["id"]] = typename
# Find un-identified, non-edge klusters.
for k in kd:
if k["id"] not in ks.keys():
ks[k["id"]] = "None"
lg.info(" *")
lg.info(" * SUMMARY:")
lg.info(" *")
for cid, ctype in ks.iteritems():
lg.info(" * %s is '%s'." % (str(cid), str(ctype)))
print("*")
print("* Sorted %d clusters!" % (len(ks)))
## Path to the sorting HTML page.
homepagename = os.path.join(sortedpath, "index.html")
## The index page for the sorted clusters.
pg = ""
pg += "<!DOCTYPE html>\n"
pg += "<html>\n"
pg += " <head>\n"
pg += " <link rel=\"stylesheet\" type=\"text/css\" "
pg += "href=\"assets/css/style.css\">\n"
pg += " </head>\n"
pg += " <body>\n"
pg += " <h1>CERN@school: Cluster Sorting</h1>\n"
pg += " <h2>Dataset summary</h2>\n"
pg += " <p>\n"
pg += " <ul>\n"
pg += " <li>Dataset path = '%s'</li>\n" % (datapath)
pg += " <li>Number of clusters = %d</li>\n" % (len(kd))
pg += " </ul>\n"
pg += " </p>\n"
pg += " <h2>Cluster types</h2>\n"
pg += " <p>\n"
# Make this into a table.
pg += " <table>\n"
pg += " <tr><th>Type</th><th colspan=\"2\">Clusters</th><th>%</th></tr>\n"
# Loop over the cluster types.
for typename in sorted(alltypes):
## The cluster type page name.
kpgname = "%s/%s.html" % (sortedpath, typename)
kpg = ""
kpg += "<!DOCTYPE html>\n"
kpg += "<html>\n"
kpg += " <head>\n"
kpg += " <link rel=\"stylesheet\" type=\"text/css\" "
kpg += "href=\"assets/css/style.css\">\n"
kpg += " </head>\n"
kpg += " <body>\n"
kpg += " <h1>CERN@school: '%s' Clusters</h1>\n" % (typename)
kpg += " <p>Back to the <a href=\"index.html\">cluster types</a> page.</p>\n"
kpg += " <table>\n"
## The number of clusters of this type.
numtype = 0
for kl, ktype in ks.iteritems():
if ktype == typename:
kpg += " <tr>\n"
kpg += " <td>"
kpg += "<a href=\"../clusters/%s.png\" target=\"_blank\">" % (kl)
kpg += "<img src=\"../clusters/%s.png\" style=\"width:256px\"/>" % (kl)
kpg += "<a/></td>\n"
kpg += " <td>ID:<br />Size:<br />Radius:<br />Density:<br />Linearity:<br />Inner frac.:<br />Total counts:<br />Max. counts:</td>\n"
kpg += " <td>%s<br />%d<br />%8.2f<br />%8.2f<br />%8.2f<br />" % (kl,ksizes[kl],krads[kl],kdens[kl],klins[kl])
kpg += "%f<br />%d<br />%d</td>\n" % (kinners[kl], kttcs[kl], kmxcs[kl])
kpg += " </tr>\n"
#
numtype += 1
kpg += " </table>\n"
kpg += " </ul>\n"
kpg += " </p>\n"
kpg += " </body>\n"
kpg += "</html>"
kf = open(kpgname, "w")
kf.write(kpg)
kf.close()
# Write the entry on the sorting homepage table.
pg += " <tr>"
pg += "<td><a href=\"%s.html\">%s</a></td>" % (typename, typename)
pg += "<td style=\"text-align:right\">%d</td>" % (numtype)
pg += "<td>"
if typename != "Edge":
perc = 100.0 * (numtype) / (len(ks) - n_edge_klusters)
for i in range(int(perc)):
pg += "|"
pg += "</td>"
pg += "<td style=\"text-align:right\">% 4.1f</td>" % (perc)
else:
pg += "<td></td><td></td>\n"
pg += "</tr>\n"
pg += " </table>\n"
pg += " </p>\n"
pg += " </body>\n"
pg += "</html>"
## The text file for the HTML page.
f = open(homepagename, "w")
f.write(pg)
f.close()
# Now you can view "index.html" to see your results!
print("*")
print("* Sorting complete.")
print("* View your results by opening '%s' in a browser, e.g." % (homepagename))
print("* firefox %s &" % (homepagename))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import VpnGatewaysTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class VpnGatewaysRestInterceptor:
"""Interceptor for VpnGateways.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the VpnGatewaysRestTransport.
.. code-block:: python
class MyCustomVpnGatewaysInterceptor(VpnGatewaysRestInterceptor):
def pre_aggregated_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_aggregated_list(response):
logging.log(f"Received response: {response}")
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_get_status(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_status(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_set_labels(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_set_labels(response):
logging.log(f"Received response: {response}")
def pre_test_iam_permissions(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_test_iam_permissions(response):
logging.log(f"Received response: {response}")
transport = VpnGatewaysRestTransport(interceptor=MyCustomVpnGatewaysInterceptor())
client = VpnGatewaysClient(transport=transport)
"""
def pre_aggregated_list(
self,
request: compute.AggregatedListVpnGatewaysRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.AggregatedListVpnGatewaysRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for aggregated_list
Override in a subclass to manipulate the request or metadata
before they are sent to the VpnGateways server.
"""
return request, metadata
def post_aggregated_list(
self, response: compute.VpnGatewayAggregatedList
) -> compute.VpnGatewayAggregatedList:
"""Post-rpc interceptor for aggregated_list
Override in a subclass to manipulate the response
after it is returned by the VpnGateways server but before
it is returned to user code.
"""
return response
def pre_delete(
self,
request: compute.DeleteVpnGatewayRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.DeleteVpnGatewayRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the VpnGateways server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the VpnGateways server but before
it is returned to user code.
"""
return response
def pre_get(
self, request: compute.GetVpnGatewayRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.GetVpnGatewayRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the VpnGateways server.
"""
return request, metadata
def post_get(self, response: compute.VpnGateway) -> compute.VpnGateway:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the VpnGateways server but before
it is returned to user code.
"""
return response
def pre_get_status(
self,
request: compute.GetStatusVpnGatewayRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetStatusVpnGatewayRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_status
Override in a subclass to manipulate the request or metadata
before they are sent to the VpnGateways server.
"""
return request, metadata
def post_get_status(
self, response: compute.VpnGatewaysGetStatusResponse
) -> compute.VpnGatewaysGetStatusResponse:
"""Post-rpc interceptor for get_status
Override in a subclass to manipulate the response
after it is returned by the VpnGateways server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertVpnGatewayRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InsertVpnGatewayRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the VpnGateways server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the VpnGateways server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListVpnGatewaysRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListVpnGatewaysRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the VpnGateways server.
"""
return request, metadata
def post_list(self, response: compute.VpnGatewayList) -> compute.VpnGatewayList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the VpnGateways server but before
it is returned to user code.
"""
return response
def pre_set_labels(
self,
request: compute.SetLabelsVpnGatewayRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.SetLabelsVpnGatewayRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for set_labels
Override in a subclass to manipulate the request or metadata
before they are sent to the VpnGateways server.
"""
return request, metadata
def post_set_labels(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for set_labels
Override in a subclass to manipulate the response
after it is returned by the VpnGateways server but before
it is returned to user code.
"""
return response
def pre_test_iam_permissions(
self,
request: compute.TestIamPermissionsVpnGatewayRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.TestIamPermissionsVpnGatewayRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for test_iam_permissions
Override in a subclass to manipulate the request or metadata
before they are sent to the VpnGateways server.
"""
return request, metadata
def post_test_iam_permissions(
self, response: compute.TestPermissionsResponse
) -> compute.TestPermissionsResponse:
"""Post-rpc interceptor for test_iam_permissions
Override in a subclass to manipulate the response
after it is returned by the VpnGateways server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class VpnGatewaysRestStub:
_session: AuthorizedSession
_host: str
_interceptor: VpnGatewaysRestInterceptor
class VpnGatewaysRestTransport(VpnGatewaysTransport):
"""REST backend transport for VpnGateways.
The VpnGateways API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, VpnGatewaysRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[VpnGatewaysRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or VpnGatewaysRestInterceptor()
self._prep_wrapped_messages(client_info)
class _AggregatedList(VpnGatewaysRestStub):
def __hash__(self):
return hash("AggregatedList")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.AggregatedListVpnGatewaysRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.VpnGatewayAggregatedList:
r"""Call the aggregated list method over HTTP.
Args:
request (~.compute.AggregatedListVpnGatewaysRequest):
The request object. A request message for
VpnGateways.AggregatedList. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.VpnGatewayAggregatedList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/aggregated/vpnGateways",
},
]
request, metadata = self._interceptor.pre_aggregated_list(request, metadata)
request_kwargs = compute.AggregatedListVpnGatewaysRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.AggregatedListVpnGatewaysRequest.to_json(
compute.AggregatedListVpnGatewaysRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.VpnGatewayAggregatedList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_aggregated_list(resp)
return resp
class _Delete(VpnGatewaysRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteVpnGatewayRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteVpnGatewayRequest):
The request object. A request message for
VpnGateways.Delete. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
request_kwargs = compute.DeleteVpnGatewayRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.DeleteVpnGatewayRequest.to_json(
compute.DeleteVpnGatewayRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(VpnGatewaysRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetVpnGatewayRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.VpnGateway:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetVpnGatewayRequest):
The request object. A request message for
VpnGateways.Get. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.VpnGateway:
Represents a HA VPN gateway. HA VPN
is a high-availability (HA) Cloud VPN
solution that lets you securely connect
your on-premises network to your Google
Cloud Virtual Private Cloud network
through an IPsec VPN connection in a
single region. For more information
about Cloud HA VPN solutions, see Cloud
VPN topologies .
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetVpnGatewayRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetVpnGatewayRequest.to_json(
compute.GetVpnGatewayRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.VpnGateway.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _GetStatus(VpnGatewaysRestStub):
def __hash__(self):
return hash("GetStatus")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetStatusVpnGatewayRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.VpnGatewaysGetStatusResponse:
r"""Call the get status method over HTTP.
Args:
request (~.compute.GetStatusVpnGatewayRequest):
The request object. A request message for
VpnGateways.GetStatus. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.VpnGatewaysGetStatusResponse:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}/getStatus",
},
]
request, metadata = self._interceptor.pre_get_status(request, metadata)
request_kwargs = compute.GetStatusVpnGatewayRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetStatusVpnGatewayRequest.to_json(
compute.GetStatusVpnGatewayRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.VpnGatewaysGetStatusResponse.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get_status(resp)
return resp
class _Insert(VpnGatewaysRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertVpnGatewayRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertVpnGatewayRequest):
The request object. A request message for
VpnGateways.Insert. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/vpnGateways",
"body": "vpn_gateway_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
request_kwargs = compute.InsertVpnGatewayRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.VpnGateway.to_json(
compute.VpnGateway(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InsertVpnGatewayRequest.to_json(
compute.InsertVpnGatewayRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_insert(resp)
return resp
class _List(VpnGatewaysRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListVpnGatewaysRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.VpnGatewayList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListVpnGatewaysRequest):
The request object. A request message for
VpnGateways.List. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.VpnGatewayList:
Contains a list of VpnGateway
resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/vpnGateways",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListVpnGatewaysRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListVpnGatewaysRequest.to_json(
compute.ListVpnGatewaysRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.VpnGatewayList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
class _SetLabels(VpnGatewaysRestStub):
def __hash__(self):
return hash("SetLabels")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.SetLabelsVpnGatewayRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the set labels method over HTTP.
Args:
request (~.compute.SetLabelsVpnGatewayRequest):
The request object. A request message for
VpnGateways.SetLabels. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/vpnGateways/{resource}/setLabels",
"body": "region_set_labels_request_resource",
},
]
request, metadata = self._interceptor.pre_set_labels(request, metadata)
request_kwargs = compute.SetLabelsVpnGatewayRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.RegionSetLabelsRequest.to_json(
compute.RegionSetLabelsRequest(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.SetLabelsVpnGatewayRequest.to_json(
compute.SetLabelsVpnGatewayRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_set_labels(resp)
return resp
class _TestIamPermissions(VpnGatewaysRestStub):
def __hash__(self):
return hash("TestIamPermissions")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.TestIamPermissionsVpnGatewayRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TestPermissionsResponse:
r"""Call the test iam permissions method over HTTP.
Args:
request (~.compute.TestIamPermissionsVpnGatewayRequest):
The request object. A request message for
VpnGateways.TestIamPermissions. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.TestPermissionsResponse:
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions",
"body": "test_permissions_request_resource",
},
]
request, metadata = self._interceptor.pre_test_iam_permissions(
request, metadata
)
request_kwargs = compute.TestIamPermissionsVpnGatewayRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.TestPermissionsRequest.to_json(
compute.TestPermissionsRequest(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.TestIamPermissionsVpnGatewayRequest.to_json(
compute.TestIamPermissionsVpnGatewayRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.TestPermissionsResponse.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_test_iam_permissions(resp)
return resp
@property
def aggregated_list(
self,
) -> Callable[
[compute.AggregatedListVpnGatewaysRequest], compute.VpnGatewayAggregatedList
]:
stub = self._STUBS.get("aggregated_list")
if not stub:
stub = self._STUBS["aggregated_list"] = self._AggregatedList(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def delete(self) -> Callable[[compute.DeleteVpnGatewayRequest], compute.Operation]:
stub = self._STUBS.get("delete")
if not stub:
stub = self._STUBS["delete"] = self._Delete(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get(self) -> Callable[[compute.GetVpnGatewayRequest], compute.VpnGateway]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get_status(
self,
) -> Callable[
[compute.GetStatusVpnGatewayRequest], compute.VpnGatewaysGetStatusResponse
]:
stub = self._STUBS.get("get_status")
if not stub:
stub = self._STUBS["get_status"] = self._GetStatus(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def insert(self) -> Callable[[compute.InsertVpnGatewayRequest], compute.Operation]:
stub = self._STUBS.get("insert")
if not stub:
stub = self._STUBS["insert"] = self._Insert(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(
self,
) -> Callable[[compute.ListVpnGatewaysRequest], compute.VpnGatewayList]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def set_labels(
self,
) -> Callable[[compute.SetLabelsVpnGatewayRequest], compute.Operation]:
stub = self._STUBS.get("set_labels")
if not stub:
stub = self._STUBS["set_labels"] = self._SetLabels(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def test_iam_permissions(
self,
) -> Callable[
[compute.TestIamPermissionsVpnGatewayRequest], compute.TestPermissionsResponse
]:
stub = self._STUBS.get("test_iam_permissions")
if not stub:
stub = self._STUBS["test_iam_permissions"] = self._TestIamPermissions(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("VpnGatewaysRestTransport",)
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib.db import api as db_api
from neutron_lib import exceptions as n_exc
from neutron_lib.exceptions import dvr as dvr_exc
from neutron_lib.objects import exceptions
from neutron_lib.plugins import directory
from neutron_lib.utils import net
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from sqlalchemy import or_
from neutron.common import utils
from neutron.conf.db import dvr_mac_db
from neutron.conf.db import l3_dvr_db
from neutron.db import models_v2
from neutron.extensions import dvr as ext_dvr
from neutron.objects import router
from neutron.plugins.ml2 import models as ml2_models
LOG = logging.getLogger(__name__)
dvr_mac_db.register_db_dvr_mac_opts()
l3_dvr_db.register_db_l3_dvr_opts()
def get_ports_query_by_subnet_and_ip(context, subnet, ip_addresses=None):
query = context.session.query(models_v2.Port)
query = query.join(models_v2.IPAllocation)
query = query.filter(
models_v2.Port.id == models_v2.IPAllocation.port_id,
models_v2.IPAllocation.subnet_id == subnet)
if ip_addresses:
query = query.filter(
models_v2.IPAllocation.ip_address.in_(ip_addresses))
return query
@registry.has_registry_receivers
class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
"""Mixin class to add dvr mac address to db_plugin_base_v2."""
@property
def plugin(self):
try:
if self._plugin is not None:
return self._plugin
except AttributeError:
pass
self._plugin = directory.get_plugin()
return self._plugin
@staticmethod
@db_api.retry_if_session_inactive()
def _db_delete_mac_associated_with_agent(context, agent):
host = agent['host']
plugin = directory.get_plugin()
if [a for a in plugin.get_agents(context, filters={'host': [host]})
if a['id'] != agent['id']]:
# there are still agents on this host, don't mess with the mac
# entry until they are all deleted.
return
if not router.DVRMacAddress.delete_objects(context, host=host):
return
# notify remaining agents so they cleanup flows
dvr_macs = plugin.get_dvr_mac_address_list(context)
plugin.notifier.dvr_mac_address_update(context, dvr_macs)
@staticmethod
@registry.receives(resources.AGENT, [events.BEFORE_DELETE])
def _delete_mac_associated_with_agent(resource, event,
trigger, payload=None):
DVRDbMixin._db_delete_mac_associated_with_agent(
payload.context, payload.latest_state)
@db_api.CONTEXT_READER
def _get_dvr_mac_address_by_host(self, context, host):
dvr_obj = router.DVRMacAddress.get_object(context, host=host)
if not dvr_obj:
raise dvr_exc.DVRMacAddressNotFound(host=host)
return self._make_dvr_mac_address_dict(dvr_obj)
@utils.transaction_guard
@db_api.retry_if_session_inactive()
def _create_dvr_mac_address_retry(self, context, host, base_mac):
with db_api.CONTEXT_WRITER.using(context):
mac_address = net.get_random_mac(base_mac)
dvr_mac_binding = router.DVRMacAddress(
context, host=host, mac_address=netaddr.EUI(mac_address))
dvr_mac_binding.create()
LOG.debug("Generated DVR mac for host %(host)s "
"is %(mac_address)s",
{'host': host, 'mac_address': mac_address})
dvr_macs = self.get_dvr_mac_address_list(context)
# TODO(vivek): improve scalability of this fanout by
# sending a single mac address rather than the entire set
self.notifier.dvr_mac_address_update(context, dvr_macs)
return self._make_dvr_mac_address_dict(dvr_mac_binding)
def _create_dvr_mac_address(self, context, host):
"""Create DVR mac address for a given host."""
base_mac = cfg.CONF.dvr_base_mac.split(':')
try:
return self._create_dvr_mac_address_retry(context, host, base_mac)
except exceptions.NeutronDbObjectDuplicateEntry:
LOG.error("MAC generation error after %s attempts",
db_api.MAX_RETRIES)
raise n_exc.HostMacAddressGenerationFailure(host=host)
@db_api.CONTEXT_READER
def get_dvr_mac_address_list(self, context):
return [
dvr_mac.to_dict()
for dvr_mac in router.DVRMacAddress.get_objects(context)
]
def get_dvr_mac_address_by_host(self, context, host):
"""Determine the MAC for the DVR port associated to host."""
if not host:
return
try:
return self._get_dvr_mac_address_by_host(context, host)
except dvr_exc.DVRMacAddressNotFound:
return self._create_dvr_mac_address(context, host)
def _make_dvr_mac_address_dict(self, dvr_mac_entry, fields=None):
return {'host': dvr_mac_entry['host'],
'mac_address': str(dvr_mac_entry['mac_address'])}
@log_helpers.log_method_call
@db_api.retry_if_session_inactive()
def get_ports_on_host_by_subnet(self, context, host, subnet):
"""Returns DVR serviced ports on a given subnet in the input host
This method returns ports that need to be serviced by DVR.
:param context: rpc request context
:param host: host id to match and extract ports of interest
:param subnet: subnet id to match and extract ports of interest
:returns: list -- Ports on the given subnet in the input host
"""
host_dvr_for_dhcp = cfg.CONF.host_dvr_for_dhcp
query = context.session.query(models_v2.Port)
query = query.join(ml2_models.PortBinding)
query = query.join(models_v2.IPAllocation)
query = query.filter(
models_v2.Port.id == ml2_models.PortBinding.port_id,
models_v2.Port.id == models_v2.IPAllocation.port_id,
ml2_models.PortBinding.host == host,
models_v2.IPAllocation.subnet_id == subnet)
owner_filter = or_(
models_v2.Port.device_owner.startswith(
constants.DEVICE_OWNER_COMPUTE_PREFIX),
models_v2.Port.device_owner.in_(
utils.get_other_dvr_serviced_device_owners(host_dvr_for_dhcp)))
ports_query = query.filter(owner_filter)
ports = [
self.plugin._make_port_dict(port, process_extensions=False,
with_fixed_ips=False)
for port in ports_query.all()
]
LOG.debug("Returning list of dvr serviced ports on host %(host)s"
" for subnet %(subnet)s ports %(ports)s",
{'host': host, 'subnet': subnet,
'ports': ports})
return ports
@log_helpers.log_method_call
@db_api.retry_if_session_inactive()
def get_subnet_for_dvr(self, context, subnet, fixed_ips=None):
if fixed_ips:
subnet_data = fixed_ips[0]['subnet_id']
else:
subnet_data = subnet
try:
subnet_info = self.plugin.get_subnet(
context, subnet_data)
except n_exc.SubnetNotFound:
return {}
else:
# retrieve the gateway port on this subnet
if fixed_ips:
ip_address = fixed_ips[0]['ip_address']
else:
ip_address = subnet_info['gateway_ip']
query = get_ports_query_by_subnet_and_ip(
context, subnet, [ip_address])
internal_gateway_ports = query.all()
if not internal_gateway_ports:
LOG.error("Could not retrieve gateway port "
"for subnet %s", subnet_info)
return {}
internal_port = internal_gateway_ports[0]
subnet_info['gateway_mac'] = internal_port['mac_address']
return subnet_info
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains classes to wrap Python VTK to make nice molecular plots.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Nov 27, 2011"
import os
import itertools
import math
import subprocess
import time
import numpy as np
try:
import vtk
from vtk import vtkInteractorStyleTrackballCamera
except ImportError:
# VTK not present. The Camera is to set object to avoid errors in unittest.
vtk = None
vtkInteractorStyleTrackballCamera = object
from monty.serialization import loadfn
from monty.dev import requires
from pymatgen.util.coord import in_coord_list
from pymatgen.core.periodic_table import Specie
from pymatgen.core.structure import Structure
from pymatgen.core.sites import PeriodicSite
module_dir = os.path.dirname(os.path.abspath(__file__))
EL_COLORS = loadfn(os.path.join(module_dir, "ElementColorSchemes.yaml"))
class StructureVis:
"""
Provides Structure object visualization using VTK.
"""
@requires(vtk, "Visualization requires the installation of VTK with "
"Python bindings.")
def __init__(self, element_color_mapping=None, show_unit_cell=True,
show_bonds=False, show_polyhedron=True,
poly_radii_tol_factor=0.5, excluded_bonding_elements=None):
"""
Constructs a Structure Visualization.
Args:
element_color_mapping: Optional color mapping for the elements,
as a dict of {symbol: rgb tuple}. For example, {"Fe": (255,
123,0), ....} If None is specified, a default based on
Jmol"s color scheme is used.
show_unit_cell: Set to False to not show the unit cell
boundaries. Defaults to True.
show_bonds: Set to True to show bonds. Defaults to True.
show_polyhedron: Set to True to show polyhedrons. Defaults to
False.
poly_radii_tol_factor: The polyhedron and bonding code uses the
ionic radii of the elements or species to determine if two
atoms are bonded. This specifies a tolerance scaling factor
such that atoms which are (1 + poly_radii_tol_factor) * sum
of ionic radii apart are still considered as bonded.
excluded_bonding_elements: List of atom types to exclude from
bonding determination. Defaults to an empty list. Useful
when trying to visualize a certain atom type in the
framework (e.g., Li in a Li-ion battery cathode material).
Useful keyboard shortcuts implemented.
h : Show help
A/a : Increase/decrease cell by one unit vector in a-direction
B/b : Increase/decrease cell by one unit vector in b-direction
C/c : Increase/decrease cell by one unit vector in c-direction
# : Toggle showing of polyhedrons
- : Toggle showing of bonds
[ : Decrease poly_radii_tol_factor by 0.05
] : Increase poly_radii_tol_factor by 0.05
r : Reset camera direction
o : Orthogonalize structure
Up/Down : Rotate view along Up direction by 90 clock/anticlockwise
Left/right : Rotate view along camera direction by 90
clock/anticlockwise
"""
# create a rendering window and renderer
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.ren.SetBackground(1, 1, 1)
self.title = "Structure Visualizer"
# create a renderwindowinteractor
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
self.mapper_map = {}
self.structure = None
if element_color_mapping:
self.el_color_mapping = element_color_mapping
else:
self.el_color_mapping = EL_COLORS["VESTA"]
self.show_unit_cell = show_unit_cell
self.show_bonds = show_bonds
self.show_polyhedron = show_polyhedron
self.poly_radii_tol_factor = poly_radii_tol_factor
self.excluded_bonding_elements = excluded_bonding_elements if \
excluded_bonding_elements else []
self.show_help = True
self.supercell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
self.redraw()
style = StructureInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.ren.parent = self
def rotate_view(self, axis_ind=0, angle=0):
"""
Rotate the camera view.
Args:
axis_ind: Index of axis to rotate. Defaults to 0, i.e., a-axis.
angle: Angle to rotate by. Defaults to 0.
"""
camera = self.ren.GetActiveCamera()
if axis_ind == 0:
camera.Roll(angle)
elif axis_ind == 1:
camera.Azimuth(angle)
else:
camera.Pitch(angle)
self.ren_win.Render()
def write_image(self, filename="image.png", magnification=1,
image_format="png"):
"""
Save render window to an image.
Arguments:
filename:
filename to save to. Defaults to image.png.
magnification:
magnification. Use it to render high res images.
image_format:
choose between jpeg, png. Png is the default.
"""
render_large = vtk.vtkRenderLargeImage()
render_large.SetInput(self.ren)
if image_format == "jpeg":
writer = vtk.vtkJPEGWriter()
writer.SetQuality(80)
else:
writer = vtk.vtkPNGWriter()
render_large.SetMagnification(magnification)
writer.SetFileName(filename)
writer.SetInputConnection(render_large.GetOutputPort())
self.ren_win.Render()
writer.Write()
del render_large
def redraw(self, reset_camera=False):
"""
Redraw the render window.
Args:
reset_camera: Set to True to reset the camera to a
pre-determined default for each structure. Defaults to False.
"""
self.ren.RemoveAllViewProps()
self.picker = None
self.add_picker_fixed()
self.helptxt_mapper = vtk.vtkTextMapper()
tprops = self.helptxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 0)
if self.structure is not None:
self.set_structure(self.structure, reset_camera)
self.ren_win.Render()
def orthongonalize_structure(self):
if self.structure is not None:
self.set_structure(self.structure.copy(sanitize=True))
self.ren_win.Render()
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = ["h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a,"
" b or c unit vector", "# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds", "r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor "
"by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 "
"clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by "
"90 clockwise/anticlockwise", "s: Save view to image.png",
"o: Orthogonalize structure"]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def set_structure(self, structure, reset_camera=True, to_unit_cell=True):
"""
Add a structure to the visualizer.
Args:
structure: structure to visualize
reset_camera: Set to True to reset the camera to a default
determined based on the structure.
to_unit_cell: Whether or not to fall back sites into the unit cell.
"""
self.ren.RemoveAllViewProps()
has_lattice = hasattr(structure, "lattice")
if has_lattice:
s = Structure.from_sites(structure, to_unit_cell=to_unit_cell)
s.make_supercell(self.supercell, to_unit_cell=to_unit_cell)
else:
s = structure
inc_coords = []
for site in s:
self.add_site(site)
inc_coords.append(site.coords)
count = 0
labels = ["a", "b", "c"]
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
if has_lattice:
matrix = s.lattice.matrix
if self.show_unit_cell and has_lattice:
#matrix = s.lattice.matrix
self.add_text([0, 0, 0], "o")
for vec in matrix:
self.add_line((0, 0, 0), vec, colors[count])
self.add_text(vec, labels[count], colors[count])
count += 1
for (vec1, vec2) in itertools.permutations(matrix, 2):
self.add_line(vec1, vec1 + vec2)
for (vec1, vec2, vec3) in itertools.permutations(matrix, 3):
self.add_line(vec1 + vec2, vec1 + vec2 + vec3)
if self.show_bonds or self.show_polyhedron:
elements = sorted(s.composition.elements, key=lambda a: a.X)
anion = elements[-1]
def contains_anion(site):
for sp in site.species.keys():
if sp.symbol == anion.symbol:
return True
return False
anion_radius = anion.average_ionic_radius
for site in s:
exclude = False
max_radius = 0
color = np.array([0, 0, 0])
for sp, occu in site.species.items():
if sp.symbol in self.excluded_bonding_elements \
or sp == anion:
exclude = True
break
max_radius = max(max_radius, sp.average_ionic_radius)
color = color + \
occu * np.array(self.el_color_mapping.get(sp.symbol,
[0, 0, 0]))
if not exclude:
max_radius = (1 + self.poly_radii_tol_factor) * \
(max_radius + anion_radius)
nn = structure.get_neighbors(site, float(max_radius))
nn_sites = []
for nnsite, dist in nn:
if contains_anion(nnsite):
nn_sites.append(nnsite)
if not in_coord_list(inc_coords, nnsite.coords):
self.add_site(nnsite)
if self.show_bonds:
self.add_bonds(nn_sites, site)
if self.show_polyhedron:
color = [i / 255 for i in color]
self.add_polyhedron(nn_sites, site, color)
if self.show_help:
self.helptxt_actor = vtk.vtkActor2D()
self.helptxt_actor.VisibilityOn()
self.helptxt_actor.SetMapper(self.helptxt_mapper)
self.ren.AddActor(self.helptxt_actor)
self.display_help()
camera = self.ren.GetActiveCamera()
if reset_camera:
if has_lattice:
#Adjust the camera for best viewing
lengths = s.lattice.abc
pos = (matrix[1] + matrix[2]) * 0.5 + \
matrix[0] * max(lengths) / lengths[0] * 3.5
camera.SetPosition(pos)
camera.SetViewUp(matrix[2])
camera.SetFocalPoint((matrix[0] + matrix[1] + matrix[2]) * 0.5)
else:
origin = s.center_of_mass
max_site = max(
s, key=lambda site: site.distance_from_point(origin))
camera.SetPosition(origin + 5 * (max_site.coords - origin))
camera.SetFocalPoint(s.center_of_mass)
self.structure = structure
self.title = s.composition.formula
def zoom(self, factor):
"""
Zoom the camera view by a factor.
"""
camera = self.ren.GetActiveCamera()
camera.Zoom(factor)
self.ren_win.Render()
def show(self):
"""
Display the visualizer.
"""
self.iren.Initialize()
self.ren_win.SetSize(800, 800)
self.ren_win.SetWindowName(self.title)
self.ren_win.Render()
self.iren.Start()
def add_site(self, site):
"""
Add a site to the render window. The site is displayed as a sphere, the
color of which is determined based on the element. Partially occupied
sites are displayed as a single element color, though the site info
still shows the partial occupancy.
Args:
site: Site to add.
"""
start_angle = 0
radius = 0
total_occu = 0
for specie, occu in site.species.items():
radius += occu * (specie.ionic_radius
if isinstance(specie, Specie)
and specie.ionic_radius
else specie.average_ionic_radius)
total_occu += occu
vis_radius = 0.2 + 0.002 * radius
for specie, occu in site.species.items():
if not specie:
color = (1, 1, 1)
elif specie.symbol in self.el_color_mapping:
color = [i / 255 for i in self.el_color_mapping[specie.symbol]]
mapper = self.add_partial_sphere(site.coords, vis_radius, color,
start_angle, start_angle + 360 * occu)
self.mapper_map[mapper] = [site]
start_angle += 360 * occu
if total_occu < 1:
mapper = self.add_partial_sphere(site.coords, vis_radius, (1,1,1),
start_angle, start_angle + 360 * (1 - total_occu))
self.mapper_map[mapper] = [site]
def add_partial_sphere(self, coords, radius, color, start=0, end=360,
opacity=1.0):
sphere = vtk.vtkSphereSource()
sphere.SetCenter(coords)
sphere.SetRadius(radius)
sphere.SetThetaResolution(18)
sphere.SetPhiResolution(18)
sphere.SetStartTheta(start)
sphere.SetEndTheta(end)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(sphere.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetOpacity(opacity)
self.ren.AddActor(actor)
return mapper
def add_text(self, coords, text, color=(0, 0, 0)):
"""
Add text at a coordinate.
Args:
coords: Coordinates to add text at.
text: Text to place.
color: Color for text as RGB. Defaults to black.
"""
source = vtk.vtkVectorText()
source.SetText(text)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.5)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
def add_line(self, start, end, color=(0.5, 0.5, 0.5), width=1):
"""
Adds a line.
Args:
start: Starting coordinates for line.
end: Ending coordinates for line.
color: Color for text as RGB. Defaults to grey.
width: Width of line. Defaults to 1.
"""
source = vtk.vtkLineSource()
source.SetPoint1(start)
source.SetPoint2(end)
vertexIDs = vtk.vtkStringArray()
vertexIDs.SetNumberOfComponents(1)
vertexIDs.SetName("VertexIDs")
# Set the vertex labels
vertexIDs.InsertNextValue("a")
vertexIDs.InsertNextValue("b")
source.GetOutput().GetPointData().AddArray(vertexIDs)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color)
actor.GetProperty().SetLineWidth(width)
self.ren.AddActor(actor)
def add_polyhedron(self, neighbors, center, color, opacity=1.0,
draw_edges=False, edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2):
"""
Adds a polyhedron.
Args:
neighbors: Neighbors of the polyhedron (the vertices).
center: The atom in the center of the polyhedron.
color: Color for text as RGB.
opacity: Opacity of the polyhedron
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
conv = vtk.vtkConvexPointSet()
for i in range(len(neighbors)):
x, y, z = neighbors[i].coords
points.InsertPoint(i, x, y, z)
conv.GetPointIds().InsertId(i, i)
grid = vtk.vtkUnstructuredGrid()
grid.Allocate(1, 1)
grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds())
grid.SetPoints(points)
dsm = vtk.vtkDataSetMapper()
polysites = [center]
polysites.extend(neighbors)
self.mapper_map[dsm] = polysites
if vtk.VTK_MAJOR_VERSION <= 5:
dsm.SetInputConnection(grid.GetProducerPort())
else:
dsm.SetInputData(grid)
ac = vtk.vtkActor()
#ac.SetMapper(mapHull)
ac.SetMapper(dsm)
ac.GetProperty().SetOpacity(opacity)
if color == 'element':
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_triangle(self, neighbors, color, center=None, opacity=0.4,
draw_edges=False, edges_color=[0.0, 0.0, 0.0],
edges_linewidth=2):
"""
Adds a triangular surface between three atoms.
Args:
atoms: Atoms between which a triangle will be drawn.
color: Color for triangle as RGB.
center: The "central atom" of the triangle
opacity: opacity of the triangle
draw_edges: If set to True, the a line will be drawn at each edge
edges_color: Color of the line for the edges
edges_linewidth: Width of the line drawn for the edges
"""
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(neighbors[ii].x, neighbors[ii].y,
neighbors[ii].z)
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
# polydata object
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints( points )
trianglePolyData.SetPolys( triangles )
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
if color == 'element':
if center is None:
raise ValueError(
'Color should be chosen according to the central atom, '
'and central atom is not provided')
# If partial occupations are involved, the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie, occu in center.species.items():
if occu > myoccu:
myspecie = specie
myoccu = occu
color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]]
ac.GetProperty().SetColor(color)
else:
ac.GetProperty().SetColor(color)
if draw_edges:
ac.GetProperty().SetEdgeColor(edges_color)
ac.GetProperty().SetLineWidth(edges_linewidth)
ac.GetProperty().EdgeVisibilityOn()
self.ren.AddActor(ac)
def add_faces(self, faces, color, opacity=0.35):
for face in faces:
if len(face) == 3:
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
for ii in range(3):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif False and len(face) == 4:
points = vtk.vtkPoints()
for ii in range(4):
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
line1 = vtk.vtkLine()
line1.GetPointIds().SetId(0, 0)
line1.GetPointIds().SetId(1, 2)
line2 = vtk.vtkLine()
line2.GetPointIds().SetId(0, 3)
line2.GetPointIds().SetId(1, 1)
lines = vtk.vtkCellArray()
lines.InsertNextCell(line1)
lines.InsertNextCell(line2)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
ruledSurfaceFilter = vtk.vtkRuledSurfaceFilter()
ruledSurfaceFilter.SetInput(polydata)
ruledSurfaceFilter.SetResolution(15, 15)
ruledSurfaceFilter.SetRuledModeToResample()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(ruledSurfaceFilter.GetOutput())
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
elif len(face) > 3:
center = np.zeros(3, np.float)
for site in face:
center += site
center /= np.float(len(face))
for ii in range(len(face)):
points = vtk.vtkPoints()
triangle = vtk.vtkTriangle()
points.InsertNextPoint(face[ii][0], face[ii][1], face[ii][2])
ii2 = np.mod(ii+1, len(face))
points.InsertNextPoint(face[ii2][0], face[ii2][1], face[ii2][2])
points.InsertNextPoint(center[0], center[1], center[2])
for ii in range(3):
triangle.GetPointIds().SetId(ii, ii)
triangles = vtk.vtkCellArray()
triangles.InsertNextCell(triangle)
trianglePolyData = vtk.vtkPolyData()
trianglePolyData.SetPoints(points)
trianglePolyData.SetPolys(triangles)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(trianglePolyData.GetProducerPort())
else:
mapper.SetInputData(trianglePolyData)
# mapper.SetInput(trianglePolyData)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetOpacity(opacity)
ac.GetProperty().SetColor(color)
self.ren.AddActor(ac)
else:
raise ValueError("Number of points for a face should be >= 3")
def add_edges(self, edges, type='line', linewidth=2, color=[0.0, 0.0, 0.0]):
points = vtk.vtkPoints()
lines = vtk.vtkCellArray()
for iedge, edge in enumerate(edges):
points.InsertPoint(2*iedge, edge[0])
points.InsertPoint(2*iedge + 1, edge[1])
lines.InsertNextCell(2)
lines.InsertCellPoint(2*iedge)
lines.InsertCellPoint(2*iedge + 1)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
polydata.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInputConnection(polydata.GetProducerPort())
else:
mapper.SetInputData(polydata)
# mapper.SetInput(polydata)
ac = vtk.vtkActor()
ac.SetMapper(mapper)
ac.GetProperty().SetColor(color)
ac.GetProperty().SetLineWidth(linewidth)
self.ren.AddActor(ac)
def add_bonds(self, neighbors, center, color=None, opacity=None,
radius=0.1):
"""
Adds bonds for a site.
Args:
neighbors: Neighbors of the site.
center: The site in the center for all bonds.
color: Color of the tubes representing the bonds
opacity: Opacity of the tubes representing the bonds
radius: Radius of tube s representing the bonds
"""
points = vtk.vtkPoints()
points.InsertPoint(0, center.x, center.y, center.z)
n = len(neighbors)
lines = vtk.vtkCellArray()
for i in range(n):
points.InsertPoint(i + 1, neighbors[i].coords)
lines.InsertNextCell(2)
lines.InsertCellPoint(0)
lines.InsertCellPoint(i + 1)
pd = vtk.vtkPolyData()
pd.SetPoints(points)
pd.SetLines(lines)
tube = vtk.vtkTubeFilter()
if vtk.VTK_MAJOR_VERSION <= 5:
tube.SetInputConnection(pd.GetProducerPort())
else:
tube.SetInputData(pd)
tube.SetRadius(radius)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tube.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
if opacity is not None:
actor.GetProperty().SetOpacity(opacity)
if color is not None:
actor.GetProperty().SetColor(color)
self.ren.AddActor(actor)
def add_picker_fixed(self):
# Create a cell picker.
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
def annotate_pick(obj, event):
if picker.GetCellId() < 0 and not self.show_help:
self.helptxt_actor.VisibilityOff()
else:
mapper = picker.GetMapper()
if mapper in self.mapper_map:
output = []
for site in self.mapper_map[mapper]:
row = ["{} - ".format(site.species_string),
", ".join(["{:.3f}".format(c)
for c in site.frac_coords]),
"[" + ", ".join(["{:.3f}".format(c)
for c in site.coords]) +
"]"]
output.append("".join(row))
self.helptxt_mapper.SetInput("\n".join(output))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
self.show_help = False
self.picker = picker
picker.AddObserver("EndPickEvent", annotate_pick)
self.iren.SetPicker(picker)
def add_picker(self):
# Create a cell picker.
picker = vtk.vtkCellPicker()
# Create a Python function to create the text for the text mapper used
# to display the results of picking.
source = vtk.vtkVectorText()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor((0, 0, 0))
follower.SetScale(0.2)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
follower.VisibilityOff()
def annotate_pick(obj, event):
if picker.GetCellId() < 0:
follower.VisibilityOff()
else:
pick_pos = picker.GetPickPosition()
mapper = picker.GetMapper()
if mapper in self.mapper_map:
site = self.mapper_map[mapper]
output = [site.species_string, "Frac. coords: " +
" ".join(["{:.4f}".format(c)
for c in
site.frac_coords])]
source.SetText("\n".join(output))
follower.SetPosition(pick_pos)
follower.VisibilityOn()
picker.AddObserver("EndPickEvent", annotate_pick)
self.picker = picker
self.iren.SetPicker(picker)
class StructureInteractorStyle(vtkInteractorStyleTrackballCamera):
"""
A custom interactor style for visualizing structures.
"""
def __init__(self, parent):
self.parent = parent
self.AddObserver("LeftButtonPressEvent", self.leftButtonPressEvent)
self.AddObserver("MouseMoveEvent", self.mouseMoveEvent)
self.AddObserver("LeftButtonReleaseEvent", self.leftButtonReleaseEvent)
self.AddObserver("KeyPressEvent", self.keyPressEvent)
def leftButtonPressEvent(self, obj, event):
self.mouse_motion = 0
self.OnLeftButtonDown()
return
def mouseMoveEvent(self, obj, event):
self.mouse_motion = 1
self.OnMouseMove()
return
def leftButtonReleaseEvent(self, obj, event):
ren = obj.GetCurrentRenderer()
iren = ren.GetRenderWindow().GetInteractor()
if self.mouse_motion == 0:
pos = iren.GetEventPosition()
iren.GetPicker().Pick(pos[0], pos[1], 0, ren)
self.OnLeftButtonUp()
return
def keyPressEvent(self, obj, event):
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym in "ABCabc":
if sym == "A":
parent.supercell[0][0] += 1
elif sym == "B":
parent.supercell[1][1] += 1
elif sym == "C":
parent.supercell[2][2] += 1
elif sym == "a":
parent.supercell[0][0] = max(parent.supercell[0][0] - 1, 1)
elif sym == "b":
parent.supercell[1][1] = max(parent.supercell[1][1] - 1, 1)
elif sym == "c":
parent.supercell[2][2] = max(parent.supercell[2][2] - 1, 1)
parent.redraw()
elif sym == "numbersign":
parent.show_polyhedron = not parent.show_polyhedron
parent.redraw()
elif sym == "minus":
parent.show_bonds = not parent.show_bonds
parent.redraw()
elif sym == "bracketleft":
parent.poly_radii_tol_factor -= 0.05 \
if parent.poly_radii_tol_factor > 0 else 0
parent.redraw()
elif sym == "bracketright":
parent.poly_radii_tol_factor += 0.05
parent.redraw()
elif sym == "h":
parent.show_help = not parent.show_help
parent.redraw()
elif sym == "r":
parent.redraw(True)
elif sym == "s":
parent.write_image("image.png")
elif sym == "Up":
parent.rotate_view(1, 90)
elif sym == "Down":
parent.rotate_view(1, -90)
elif sym == "Left":
parent.rotate_view(0, -90)
elif sym == "Right":
parent.rotate_view(0, 90)
elif sym == "o":
parent.orthongonalize_structure()
parent.redraw()
self.OnKeyPress()
def make_movie(structures, output_filename="movie.mp4", zoom=1.0, fps=20,
bitrate="10000k", quality=1, **kwargs):
"""
Generate a movie from a sequence of structures using vtk and ffmpeg.
Args:
structures ([Structure]): sequence of structures
output_filename (str): filename for structure output. defaults to
movie.mp4
zoom (float): A zoom to be applied to the visualizer. Defaults to 1.0.
fps (int): Frames per second for the movie. Defaults to 20.
bitrate (str): Video bitate. Defaults to "10000k" (fairly high
quality).
quality (int): A quality scale. Defaults to 1.
\\*\\*kwargs: Any kwargs supported by StructureVis to modify the images
generated.
"""
vis = StructureVis(**kwargs)
vis.show_help = False
vis.redraw()
vis.zoom(zoom)
sigfig = int(math.floor(math.log10(len(structures))) + 1)
filename = "image{0:0" + str(sigfig) + "d}.png"
for i, s in enumerate(structures):
vis.set_structure(s)
vis.write_image(filename.format(i), 3)
filename = "image%0" + str(sigfig) + "d.png"
args = ["ffmpeg", "-y", "-i", filename,
"-q:v", str(quality), "-r", str(fps), "-b:v", str(bitrate),
output_filename]
subprocess.Popen(args)
class MultiStructuresVis(StructureVis):
DEFAULT_ANIMATED_MOVIE_OPTIONS = {'time_between_frames': 0.1,
'looping_type': 'restart',
'number_of_loops': 1,
'time_between_loops': 1.0}
def __init__(self, element_color_mapping=None, show_unit_cell=True,
show_bonds=False, show_polyhedron=False,
poly_radii_tol_factor=0.5, excluded_bonding_elements=None,
animated_movie_options=DEFAULT_ANIMATED_MOVIE_OPTIONS):
super().__init__(element_color_mapping=element_color_mapping,
show_unit_cell=show_unit_cell,
show_bonds=show_bonds, show_polyhedron=show_polyhedron,
poly_radii_tol_factor=poly_radii_tol_factor,
excluded_bonding_elements=excluded_bonding_elements)
self.warningtxt_actor = vtk.vtkActor2D()
self.infotxt_actor = vtk.vtkActor2D()
self.structures = None
style = MultiStructuresInteractorStyle(self)
self.iren.SetInteractorStyle(style)
self.istruct = 0
self.current_structure = None
self.set_animated_movie_options(animated_movie_options=animated_movie_options)
def set_structures(self, structures, tags=None):
self.structures = structures
self.istruct = 0
self.current_structure = self.structures[self.istruct]
self.tags = tags if tags is not None else []
self.all_radii = []
self.all_vis_radii = []
for struct in self.structures:
struct_radii = []
struct_vis_radii = []
for site in struct:
radius = 0
for specie, occu in site.species.items():
radius += occu * (specie.ionic_radius
if isinstance(specie, Specie)
and specie.ionic_radius
else specie.average_ionic_radius)
vis_radius = 0.2 + 0.002 * radius
struct_radii.append(radius)
struct_vis_radii.append(vis_radius)
self.all_radii.append(struct_radii)
self.all_vis_radii.append(struct_vis_radii)
self.set_structure(self.current_structure, reset_camera=True, to_unit_cell=False)
def set_structure(self, structure, reset_camera=True, to_unit_cell=False):
super().set_structure(structure=structure, reset_camera=reset_camera,
to_unit_cell=to_unit_cell)
self.apply_tags()
def apply_tags(self):
tags = {}
for tag in self.tags:
istruct = tag.get('istruct', 'all')
if istruct != 'all':
if istruct != self.istruct:
continue
site_index = tag['site_index']
color = tag.get('color', [0.5, 0.5, 0.5])
opacity = tag.get('opacity', 0.5)
if site_index == 'unit_cell_all':
struct_radii = self.all_vis_radii[self.istruct]
for isite, site in enumerate(self.current_structure):
vis_radius = 1.5 * tag.get('radius', struct_radii[isite])
tags[(isite, (0, 0, 0))] = {'radius': vis_radius,
'color': color,
'opacity': opacity}
continue
cell_index = tag['cell_index']
if 'radius' in tag:
vis_radius = tag['radius']
elif 'radius_factor' in tag:
vis_radius = tag['radius_factor'] * self.all_vis_radii[self.istruct][site_index]
else:
vis_radius = 1.5 * self.all_vis_radii[self.istruct][site_index]
tags[(site_index, cell_index)] = {'radius': vis_radius,
'color': color,
'opacity': opacity}
for site_and_cell_index, tag_style in tags.items():
isite, cell_index = site_and_cell_index
site = self.current_structure[isite]
if cell_index == (0, 0, 0):
coords = site.coords
else:
fcoords = site.frac_coords + np.array(cell_index)
site_image = PeriodicSite(site.species, fcoords,
self.current_structure.lattice, to_unit_cell=False,
coords_are_cartesian=False,
properties=site.properties)
self.add_site(site_image)
coords = site_image.coords
vis_radius = tag_style['radius']
color = tag_style['color']
opacity = tag_style['opacity']
self.add_partial_sphere(coords=coords, radius=vis_radius,
color=color, start=0, end=360,
opacity=opacity)
def set_animated_movie_options(self, animated_movie_options=None):
if animated_movie_options is None:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
else:
self.animated_movie_options = self.DEFAULT_ANIMATED_MOVIE_OPTIONS.copy()
for key in animated_movie_options:
if key not in self.DEFAULT_ANIMATED_MOVIE_OPTIONS.keys():
raise ValueError('Wrong option for animated movie')
self.animated_movie_options.update(animated_movie_options)
def display_help(self):
"""
Display the help for various keyboard shortcuts.
"""
helptxt = ["h : Toggle help",
"A/a, B/b or C/c : Increase/decrease cell by one a,"
" b or c unit vector", "# : Toggle showing of polyhedrons",
"-: Toggle showing of bonds", "r : Reset camera direction",
"[/]: Decrease or increase poly_radii_tol_factor "
"by 0.05. Value = " + str(self.poly_radii_tol_factor),
"Up/Down: Rotate view along Up direction by 90 "
"clockwise/anticlockwise",
"Left/right: Rotate view along camera direction by "
"90 clockwise/anticlockwise", "s: Save view to image.png",
"o: Orthogonalize structure",
"n: Move to next structure",
"p: Move to previous structure",
"m: Animated movie of the structures"]
self.helptxt_mapper.SetInput("\n".join(helptxt))
self.helptxt_actor.SetPosition(10, 10)
self.helptxt_actor.VisibilityOn()
def display_warning(self, warning):
self.warningtxt_mapper = vtk.vtkTextMapper()
tprops = self.warningtxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(1, 0, 0)
tprops.BoldOn()
tprops.SetJustificationToRight()
self.warningtxt = "WARNING : {}".format(warning)
self.warningtxt_actor = vtk.vtkActor2D()
self.warningtxt_actor.VisibilityOn()
self.warningtxt_actor.SetMapper(self.warningtxt_mapper)
self.ren.AddActor(self.warningtxt_actor)
self.warningtxt_mapper.SetInput(self.warningtxt)
winsize = self.ren_win.GetSize()
self.warningtxt_actor.SetPosition(winsize[0]-10, 10)
self.warningtxt_actor.VisibilityOn()
def erase_warning(self):
self.warningtxt_actor.VisibilityOff()
def display_info(self, info):
self.infotxt_mapper = vtk.vtkTextMapper()
tprops = self.infotxt_mapper.GetTextProperty()
tprops.SetFontSize(14)
tprops.SetFontFamilyToTimes()
tprops.SetColor(0, 0, 1)
tprops.BoldOn()
tprops.SetVerticalJustificationToTop()
self.infotxt = "INFO : {}".format(info)
self.infotxt_actor = vtk.vtkActor2D()
self.infotxt_actor.VisibilityOn()
self.infotxt_actor.SetMapper(self.infotxt_mapper)
self.ren.AddActor(self.infotxt_actor)
self.infotxt_mapper.SetInput(self.infotxt)
winsize = self.ren_win.GetSize()
self.infotxt_actor.SetPosition(10, winsize[1]-10)
self.infotxt_actor.VisibilityOn()
def erase_info(self):
self.infotxt_actor.VisibilityOff()
class MultiStructuresInteractorStyle(StructureInteractorStyle):
def __init__(self, parent):
StructureInteractorStyle.__init__(self, parent=parent)
def keyPressEvent(self, obj, event):
parent = obj.GetCurrentRenderer().parent
sym = parent.iren.GetKeySym()
if sym == "n":
if parent.istruct == len(parent.structures) - 1:
parent.display_warning('LAST STRUCTURE')
parent.ren_win.Render()
else:
parent.istruct += 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "p":
if parent.istruct == 0:
parent.display_warning('FIRST STRUCTURE')
parent.ren_win.Render()
else:
parent.istruct -= 1
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
elif sym == "m":
parent.istruct = 0
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.erase_warning()
parent.ren_win.Render()
nloops = parent.animated_movie_options['number_of_loops']
tstep = parent.animated_movie_options['time_between_frames']
tloops = parent.animated_movie_options['time_between_loops']
if parent.animated_movie_options['looping_type'] == 'restart':
loop_istructs = range(len(parent.structures))
elif parent.animated_movie_options['looping_type'] == 'palindrome':
loop_istructs = range(len(parent.structures))+range(len(parent.structures)-2, -1, -1)
else:
raise ValueError('"looping_type" should be "restart" or "palindrome"')
for iloop in range(nloops):
for istruct in loop_istructs:
time.sleep(tstep)
parent.istruct = istruct
parent.current_structure = parent.structures[parent.istruct]
parent.set_structure(parent.current_structure, reset_camera=False, to_unit_cell=False)
parent.display_info('Animated movie : structure {:d}/{:d} '
'(loop {:d}/{:d})'.format(istruct+1, len(parent.structures),
iloop+1, nloops))
parent.ren_win.Render()
time.sleep(tloops)
parent.erase_info()
parent.display_info('Ended animated movie ...')
parent.ren_win.Render()
StructureInteractorStyle.keyPressEvent(self, obj, event)
|
|
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo_config import cfg
from oslo_vmware import exceptions as vexc
from oslo_vmware import vim_util as vutil
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vm_util
CONF = cfg.CONF
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVifTestCase, self).setUp()
self.flags(vlan_interface='vmnet0', group='vmware')
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
self.session = fake.FakeSession()
self.cluster = None
def tearDown(self):
super(VMwareVifTestCase, self).tearDown()
def test_ensure_vlan_bridge(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
# FlatDHCP network mode without vlan - network doesn't exist with the host
def test_ensure_vlan_bridge_without_vlan(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# FlatDHCP network mode without vlan - network exists with the host
# Get vswitch and check vlan interface should not be called
def test_ensure_vlan_bridge_with_network(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
vm_network = {'name': 'VM Network', 'type': 'Network'}
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(vm_network)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# Flat network mode with DVS
def test_ensure_vlan_bridge_with_existing_dvs(self):
network_ref = {'dvpg': 'dvportgroup-2062',
'type': 'DistributedVirtualPortgroup'}
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(network_ref)
self.mox.ReplayAll()
ref = vif.ensure_vlan_bridge(self.session,
self.vif,
create_vlan=False)
self.assertThat(ref, matchers.DictMatches(network_ref))
def test_get_network_ref_neutron(self):
self.mox.StubOutWithMock(vif, 'get_neutron_network')
vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, True)
def test_get_network_ref_flat_dhcp(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=False)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=True)
self.mox.ReplayAll()
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True,
should_create_vlan=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge_from_opaque(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id',
'opaqueNetworkName': 'name',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertEqual('bridge_id', network_ref['network-id'])
def test_get_network_ref_multiple_bridges_from_opaque(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'bridge_id2',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id2')
self.assertEqual('bridge_id2', network_ref['network-id'])
def test_get_network_ref_integration(self):
opaque_networks = [{'opaqueNetworkId': 'integration_bridge',
'opaqueNetworkName': 'name',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertEqual('integration_bridge', network_ref['network-id'])
def test_get_network_ref_bridge_none(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'bridge_id2',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertIsNone(network_ref)
def test_get_network_ref_integration_multiple(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'integration_bridge',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertIsNone(network_ref)
def test_get_neutron_network(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vutil, "get_object_property",
'fake-host', 'config.network.opaqueNetwork').AndReturn(opaque)
vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
CONF.vmware.integration_bridge,
self.vif['network']['id']).AndReturn('fake-network-ref')
self.mox.ReplayAll()
network_ref = vif.get_neutron_network(self.session,
self.vif['network']['id'],
self.cluster,
self.vif)
self.assertEqual(network_ref, 'fake-network-ref')
def test_get_neutron_network_opaque_network_not_found(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vutil, "get_object_property",
'fake-host', 'config.network.opaqueNetwork').AndReturn(opaque)
vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
CONF.vmware.integration_bridge,
self.vif['network']['id']).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_neutron_network, self.session,
self.vif['network']['id'], self.cluster, self.vif)
def test_get_neutron_network_bridge_network_not_found(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vutil, "get_object_property",
'fake-host', 'config.network.opaqueNetwork').AndReturn(None)
network_util.get_network_with_the_name(self.session, 0,
self.cluster).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_neutron_network, self.session,
self.vif['network']['id'], self.cluster, self.vif)
def test_create_port_group_already_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise vexc.AlreadyExistsException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
network_util.create_port_group(self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_create_port_group_exception(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise vexc.VMwareDriverException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
self.assertRaises(vexc.VMwareDriverException,
network_util.create_port_group,
self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_get_neutron_network_invalid_property(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'get_object_property':
raise vexc.InvalidPropertyException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(network_util, 'get_network_with_the_name')
) as (_get_host, _call_method, _get_name):
vif.get_neutron_network(self.session, 'network_name',
'cluster', self.vif)
def test_get_vif_info_none(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', None)
self.assertEqual([], vif_info)
def test_get_vif_info_empty_list(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', [])
self.assertEqual([], vif_info)
@mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
def test_get_vif_info(self, mock_get_network_ref):
network_info = utils.get_test_network_info()
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', network_info)
expected = [{'iface_id': 'vif-xxx-yyy-zzz',
'mac_address': 'fake',
'network_name': 'fake',
'network_ref': 'fake_ref',
'vif_model': 'fake_model'}]
self.assertEqual(expected, vif_info)
|
|
import contextlib
import itertools
from sqlalchemy import bindparam
from sqlalchemy import event
from sqlalchemy import exc as sa_exc
from sqlalchemy import func
from sqlalchemy import literal_column
from sqlalchemy import testing
from sqlalchemy.ext import baked
from sqlalchemy.orm import aliased
from sqlalchemy.orm import backref
from sqlalchemy.orm import defaultload
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import Load
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm.query import Query
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not_
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertsql import CompiledSQL
from test.orm import _fixtures
class BakedTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
def setup(self):
self.bakery = baked.bakery()
class StateChangeTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
mapper(User, cls.tables.users)
def _assert_cache_key(self, key, elements):
eq_(key, tuple(elem.__code__ for elem in elements))
def test_initial_key(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
eq_(q1.steps, [l1])
def test_inplace_add(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
eq_(q1.steps, [l1])
q2 = q1.add_criteria(l2)
is_(q2, q1)
self._assert_cache_key(q1._cache_key, [l1, l2])
eq_(q1.steps, [l1, l2])
def test_inplace_add_operator(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
self._assert_cache_key(q1._cache_key, [l1])
q1 += l2
self._assert_cache_key(q1._cache_key, [l1, l2])
def test_chained_add(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
q2 = q1.with_criteria(l2)
is_not_(q2, q1)
self._assert_cache_key(q1._cache_key, [l1])
self._assert_cache_key(q2._cache_key, [l1, l2])
def test_chained_add_operator(self):
User = self.classes.User
session = Session()
def l1():
return session.query(User)
def l2(q):
return q.filter(User.name == bindparam("name"))
q1 = self.bakery(l1)
q2 = q1 + l2
is_not_(q2, q1)
self._assert_cache_key(q1._cache_key, [l1])
self._assert_cache_key(q2._cache_key, [l1, l2])
class LikeQueryTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
mapper(User, cls.tables.users)
def test_first_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
eq_(bq(Session()).first(), None)
def test_first_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
bq += lambda q: q.filter(User.name.like("%ed%")).order_by(User.id)
eq_(bq(Session()).first(), (8,))
def test_one_or_none_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
eq_(bq(Session()).one_or_none(), None)
def test_one_or_none_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "ed")
u1 = bq(Session()).one_or_none()
eq_(u1.name, "ed")
def test_one_or_none_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like("%ed%"))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found for one_or_none()",
bq(Session()).one_or_none,
)
def test_one_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
assert_raises_message(
orm_exc.NoResultFound,
"No row was found for one()",
bq(Session()).one,
)
def test_one_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "ed")
u1 = bq(Session()).one()
eq_(u1.name, "ed")
def test_one_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like("%ed%"))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found for one()",
bq(Session()).one,
)
def test_get(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = Session()
def go():
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
self.assert_sql_count(testing.db, go, 1)
u1 = sess.query(User).get(7) # noqa
def go():
u2 = bq(sess).get(7)
eq_(u2.name, "jack")
self.assert_sql_count(testing.db, go, 0)
def go():
u2 = bq(sess).get(8)
eq_(u2.name, "ed")
self.assert_sql_count(testing.db, go, 1)
def test_scalar(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
sess = Session()
bq += lambda q: q.filter(User.id == 7)
eq_(bq(sess).scalar(), 7)
def test_count(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = Session()
eq_(bq(sess).count(), 4)
bq += lambda q: q.filter(User.id.in_([8, 9]))
eq_(bq(sess).count(), 2)
# original query still works
eq_(
set([(u.id, u.name) for u in bq(sess).all()]),
set([(8, "ed"), (9, "fred")]),
)
def test_count_with_bindparams(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = Session()
eq_(bq(sess).count(), 4)
bq += lambda q: q.filter(User.name == bindparam("uname"))
# calling with *args
eq_(bq(sess).params(uname="fred").count(), 1)
# with multiple params, the **kwargs will be used
bq += lambda q: q.filter(User.id == bindparam("anid"))
eq_(bq(sess).params(uname="fred", anid=9).count(), 1)
eq_(
# wrong id, so 0 results:
bq(sess).params(uname="fred", anid=8).count(),
0,
)
def test_get_pk_w_null(self):
"""test the re-implementation of logic to do get with IS NULL."""
class AddressUser(object):
pass
mapper(
AddressUser,
self.tables.users.outerjoin(self.tables.addresses),
properties={
"id": self.tables.users.c.id,
"address_id": self.tables.addresses.c.id,
},
)
bq = self.bakery(lambda s: s.query(AddressUser))
sess = Session()
def go():
u1 = bq(sess).get((10, None))
eq_(u1.name, "chuck")
self.assert_sql_count(testing.db, go, 1)
u1 = sess.query(AddressUser).get((10, None)) # noqa
def go():
u2 = bq(sess).get((10, None))
eq_(u2.name, "chuck")
self.assert_sql_count(testing.db, go, 0)
def test_get_includes_getclause(self):
# test issue #3597
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
for i in range(5):
sess = Session()
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
sess.close()
eq_(len(bq._bakery), 2)
# simulate race where mapper._get_clause
# may be generated more than once
from sqlalchemy import inspect
del inspect(User).__dict__["_get_clause"]
for i in range(5):
sess = Session()
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
sess.close()
eq_(len(bq._bakery), 4)
class ResultPostCriteriaTest(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
Order = cls.classes.Order
mapper(
User,
cls.tables.users,
properties={
"addresses": relationship(
Address, order_by=cls.tables.addresses.c.id
),
"orders": relationship(Order, order_by=cls.tables.orders.c.id),
},
)
mapper(Address, cls.tables.addresses)
mapper(Order, cls.tables.orders)
@contextlib.contextmanager
def _fixture(self):
from sqlalchemy import event
User = self.classes.User
with testing.db.connect() as conn:
@event.listens_for(conn, "before_execute")
def before_execute(conn, clauseelement, multiparams, params):
assert "yes" in conn._execution_options
bq = self.bakery(lambda s: s.query(User.id).order_by(User.id))
sess = Session(conn)
yield sess, bq
def test_first(self):
with self._fixture() as (sess, bq):
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(result.first(), (7,))
def test_iter(self):
with self._fixture() as (sess, bq):
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(list(result)[0], (7,))
def test_spoiled(self):
with self._fixture() as (sess, bq):
result = bq.spoil()(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(list(result)[0], (7,))
def test_get(self):
User = self.classes.User
with self._fixture() as (sess, bq):
bq = self.bakery(lambda s: s.query(User))
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(result.get(7), User(id=7))
class ResultTest(BakedTest):
__backend__ = True
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
Order = cls.classes.Order
mapper(
User,
cls.tables.users,
properties={
"addresses": relationship(
Address, order_by=cls.tables.addresses.c.id
),
"orders": relationship(Order, order_by=cls.tables.orders.c.id),
},
)
mapper(Address, cls.tables.addresses)
mapper(Order, cls.tables.orders)
def test_cachekeys_on_constructor(self):
User = self.classes.User
queue = [7, 8]
def fn(s):
return s.query(User.id).filter_by(id=queue.pop(0))
bq1 = self.bakery(fn, 7)
bq2 = self.bakery(fn, 8)
for i in range(3):
session = Session(autocommit=True)
eq_(bq1(session).all(), [(7,)])
eq_(bq2(session).all(), [(8,)])
def test_no_steps(self):
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name).order_by(User.id)
)
for i in range(3):
session = Session(autocommit=True)
eq_(
bq(session).all(),
[(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")],
)
def test_different_limits(self):
User = self.classes.User
bq = self.bakery(
lambda s: s.query(User.id, User.name).order_by(User.id)
)
bq += lambda q: q.limit(bindparam("limit")).offset(bindparam("offset"))
session = Session(autocommit=True)
for i in range(4):
for limit, offset, exp in [
(2, 1, [(8, "ed"), (9, "fred")]),
(3, 0, [(7, "jack"), (8, "ed"), (9, "fred")]),
(1, 2, [(9, "fred")]),
]:
eq_(bq(session).params(limit=limit, offset=offset).all(), exp)
def test_disable_on_session(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = Session(autocommit=True, enable_baked_queries=False)
eq_(bq.add_criteria(fn3)(sess).params(id=7).all(), [(7, "jack")])
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
],
)
def test_spoiled_full_w_params(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = Session(autocommit=True)
eq_(
bq.spoil(full=True).add_criteria(fn3)(sess).params(id=7).all(),
[(7, "jack")],
)
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
],
)
def test_spoiled_half_w_params(self):
User = self.classes.User
canary = mock.Mock()
def fn1(s):
canary.fn1()
return s.query(User.id, User.name).order_by(User.id)
def fn2(q):
canary.fn2()
return q.filter(User.id == bindparam("id"))
def fn3(q):
canary.fn3()
return q
bq = self.bakery(fn1)
bq += fn2
for x in range(3):
bq = self.bakery(fn1)
bq += fn2
sess = Session(autocommit=True)
eq_(
bq.spoil().add_criteria(fn3)(sess).params(id=7).all(),
[(7, "jack")],
)
eq_(
canary.mock_calls,
[
mock.call.fn1(),
mock.call.fn2(),
mock.call.fn3(),
mock.call.fn3(),
mock.call.fn3(),
],
)
def test_w_new_entities(self):
"""Test that the query can have its entities modified in
an arbitrary callable, and that this new entity list is preserved
when the query is invoked.
"""
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id, User.name))
bq += lambda q: q.from_self().with_entities(func.count(User.id))
for i in range(3):
session = Session(autocommit=True)
eq_(bq(session).all(), [(4,)])
def test_conditional_step(self):
"""Test a large series of conditionals and assert that
results remain correct between all of them within a series
of loops.
"""
User = self.classes.User
base_bq = self.bakery(lambda s: s.query(User.id, User.name))
base_bq += lambda q: q.order_by(User.id)
for i in range(4):
for cond1, cond2, cond3, cond4 in itertools.product(
*[(False, True) for j in range(4)]
):
bq = base_bq._clone()
if cond1:
bq += lambda q: q.filter(User.name != "jack")
if cond2:
bq += lambda q: q.join(User.addresses)
else:
bq += lambda q: q.outerjoin(User.addresses)
elif cond3:
bq += lambda q: q.filter(User.name.like("%ed%"))
else:
bq += lambda q: q.filter(User.name == "jack")
if cond4:
bq += lambda q: q.from_self().with_entities(
func.count(User.id)
)
sess = Session(autocommit=True)
result = bq(sess).all()
if cond4:
if cond1:
if cond2:
eq_(result, [(4,)])
else:
eq_(result, [(5,)])
elif cond3:
eq_(result, [(2,)])
else:
eq_(result, [(1,)])
else:
if cond1:
if cond2:
eq_(
result,
[(8, "ed"), (8, "ed"), (8, "ed"), (9, "fred")],
)
else:
eq_(
result,
[
(8, "ed"),
(8, "ed"),
(8, "ed"),
(9, "fred"),
(10, "chuck"),
],
)
elif cond3:
eq_(result, [(8, "ed"), (9, "fred")])
else:
eq_(result, [(7, "jack")])
sess.close()
def test_conditional_step_oneline(self):
User = self.classes.User
base_bq = self.bakery(lambda s: s.query(User.id, User.name))
base_bq += lambda q: q.order_by(User.id)
for i in range(4):
for cond1 in (False, True):
bq = base_bq._clone()
# we were using (filename, firstlineno) as cache key,
# which fails for this kind of thing!
bq += (
(lambda q: q.filter(User.name != "jack"))
if cond1
else (lambda q: q.filter(User.name == "jack"))
) # noqa
sess = Session(autocommit=True)
result = bq(sess).all()
if cond1:
eq_(result, [(8, u"ed"), (9, u"fred"), (10, u"chuck")])
else:
eq_(result, [(7, "jack")])
sess.close()
def test_to_query_query(self):
User = self.classes.User
Address = self.classes.Address
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += (
lambda q: q.filter(User.id == Address.user_id)
.filter(User.name == "ed")
.correlate(Address)
)
main_bq = self.bakery(lambda s: s.query(Address.id))
main_bq += lambda q: q.filter(sub_bq.to_query(q).exists())
main_bq += lambda q: q.order_by(Address.id)
sess = Session()
result = main_bq(sess).all()
eq_(result, [(2,), (3,), (4,)])
def test_to_query_session(self):
User = self.classes.User
Address = self.classes.Address
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(
Address
)
main_bq = self.bakery(
lambda s: s.query(Address.id, sub_bq.to_query(s).as_scalar())
)
main_bq += lambda q: q.filter(sub_bq.to_query(q).as_scalar() == "ed")
main_bq += lambda q: q.order_by(Address.id)
sess = Session()
result = main_bq(sess).all()
eq_(result, [(2, "ed"), (3, "ed"), (4, "ed")])
def test_to_query_args(self):
User = self.classes.User
sub_bq = self.bakery(lambda s: s.query(User.name))
q = Query([], None)
assert_raises_message(
sa_exc.ArgumentError,
"Given Query needs to be associated with a Session",
sub_bq.to_query,
q,
)
assert_raises_message(
TypeError,
"Query or Session object expected, got .*'int'.*",
sub_bq.to_query,
5,
)
def test_subquery_eagerloading(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
# Override the default bakery for one with a smaller size. This used to
# trigger a bug when unbaking subqueries.
self.bakery = baked.bakery(size=3)
base_bq = self.bakery(lambda s: s.query(User))
base_bq += lambda q: q.options(
subqueryload(User.addresses), subqueryload(User.orders)
)
base_bq += lambda q: q.order_by(User.id)
assert_result = [
User(
id=7,
addresses=[Address(id=1, email_address="jack@bean.com")],
orders=[Order(id=1), Order(id=3), Order(id=5)],
),
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
],
),
User(
id=9,
addresses=[Address(id=5)],
orders=[Order(id=2), Order(id=4)],
),
User(id=10, addresses=[]),
]
for i in range(4):
for cond1, cond2 in itertools.product(
*[(False, True) for j in range(2)]
):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(User.name == "jack")
else:
bq += lambda q: q.filter(User.name.like("%ed%"))
if cond2:
ct = func.count(Address.id).label("count")
subq = (
sess.query(ct, Address.user_id)
.group_by(Address.user_id)
.having(ct > 2)
.subquery()
)
bq += lambda q: q.join(subq)
if cond2:
if cond1:
def go():
result = bq(sess).all()
eq_([], result)
self.assert_sql_count(testing.db, go, 1)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:2], result)
self.assert_sql_count(testing.db, go, 3)
else:
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 3)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:3], result)
self.assert_sql_count(testing.db, go, 3)
sess.close()
def test_subqueryload_post_context(self):
User = self.classes.User
Address = self.classes.Address
assert_result = [
User(
id=7, addresses=[Address(id=1, email_address="jack@bean.com")]
)
]
self.bakery = baked.bakery(size=3)
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.options(subqueryload(User.addresses))
bq += lambda q: q.order_by(User.id)
bq += lambda q: q.filter(User.name == bindparam("name"))
sess = Session()
def set_params(q):
return q.params(name="jack")
# test that the changes we make using with_post_criteria()
# are also applied to the subqueryload query.
def go():
result = bq(sess).with_post_criteria(set_params).all()
eq_(assert_result, result)
self.assert_sql_count(testing.db, go, 2)
@testing.fixture()
def before_compile_nobake_fixture(self):
@event.listens_for(Query, "before_compile", retval=True)
def _modify_query(query):
query = query.enable_assertions(False)
return query
yield
event.remove(Query, "before_compile", _modify_query)
def test_subqueryload_post_context_w_cancelling_event(
self, before_compile_nobake_fixture
):
User = self.classes.User
Address = self.classes.Address
assert_result = [
User(
id=7, addresses=[Address(id=1, email_address="jack@bean.com")]
)
]
self.bakery = baked.bakery(size=3)
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.options(subqueryload(User.addresses))
bq += lambda q: q.order_by(User.id)
bq += lambda q: q.filter(User.name == bindparam("name"))
sess = Session()
def set_params(q):
return q.params(name="jack")
# test that the changes we make using with_post_criteria()
# are also applied to the subqueryload query.
def go():
result = bq(sess).with_post_criteria(set_params).all()
eq_(assert_result, result)
self.assert_sql_count(testing.db, go, 2)
class LazyLoaderTest(testing.AssertsCompiledSQL, BakedTest):
run_setup_mappers = "each"
@testing.fixture
def modify_query_fixture(self):
def set_event(bake_ok):
event.listen(
Query,
"before_compile",
_modify_query,
retval=True,
bake_ok=bake_ok,
)
return m1
m1 = mock.Mock()
def _modify_query(query):
m1(query.column_descriptions[0]["entity"])
query = query.enable_assertions(False).filter(
literal_column("1") == 1
)
return query
yield set_event
event.remove(Query, "before_compile", _modify_query)
def _o2m_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
mapper(
User,
self.tables.users,
properties={
"addresses": relationship(
Address,
order_by=self.tables.addresses.c.id,
lazy=lazy,
**kw
)
},
)
mapper(Address, self.tables.addresses)
return User, Address
def _o2m_twolevel_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
Dingaling = self.classes.Dingaling
mapper(
User,
self.tables.users,
properties={
"addresses": relationship(
Address,
order_by=self.tables.addresses.c.id,
lazy=lazy,
**kw
)
},
)
mapper(
Address,
self.tables.addresses,
properties={"dingalings": relationship(Dingaling, lazy=lazy)},
)
mapper(Dingaling, self.tables.dingalings)
return User, Address, Dingaling
def _m2o_fixture(self):
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(
Address,
self.tables.addresses,
properties={"user": relationship(User)},
)
return User, Address
def test_no_cache_for_event(self, modify_query_fixture):
m1 = modify_query_fixture(False)
User, Address = self._o2m_fixture()
sess = Session()
u1 = sess.query(User).filter(User.id == 7).first()
u1.addresses
eq_(m1.mock_calls, [mock.call(User), mock.call(Address)])
sess.expire(u1, ["addresses"])
u1.addresses
eq_(
m1.mock_calls,
[mock.call(User), mock.call(Address), mock.call(Address)],
)
def test_cache_ok_for_event(self, modify_query_fixture):
m1 = modify_query_fixture(True)
User, Address = self._o2m_fixture()
sess = Session()
u1 = sess.query(User).filter(User.id == 7).first()
u1.addresses
eq_(m1.mock_calls, [mock.call(User), mock.call(Address)])
sess.expire(u1, ["addresses"])
u1.addresses
eq_(m1.mock_calls, [mock.call(User), mock.call(Address)])
def test_unsafe_unbound_option_cancels_bake(self):
User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined")
class SubDingaling(Dingaling):
pass
mapper(SubDingaling, None, inherits=Dingaling)
lru = Address.dingalings.property._lazy_strategy._bakery(
lambda q: None
)._bakery
l1 = len(lru)
for i in range(5):
sess = Session()
u1 = (
sess.query(User)
.options(
defaultload(User.addresses).lazyload(
Address.dingalings.of_type(aliased(SubDingaling))
)
)
.first()
)
for ad in u1.addresses:
ad.dingalings
l2 = len(lru)
eq_(l1, 0)
eq_(l2, 0)
def test_unsafe_bound_option_cancels_bake(self):
User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined")
class SubDingaling(Dingaling):
pass
mapper(SubDingaling, None, inherits=Dingaling)
lru = Address.dingalings.property._lazy_strategy._bakery(
lambda q: None
)._bakery
l1 = len(lru)
for i in range(5):
sess = Session()
u1 = (
sess.query(User)
.options(
Load(User)
.defaultload(User.addresses)
.lazyload(
Address.dingalings.of_type(aliased(SubDingaling))
)
)
.first()
)
for ad in u1.addresses:
ad.dingalings
l2 = len(lru)
eq_(l1, 0)
eq_(l2, 0)
def test_safe_unbound_option_allows_bake(self):
User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined")
lru = Address.dingalings.property._lazy_strategy._bakery(
lambda q: None
)._bakery
l1 = len(lru)
for i in range(5):
sess = Session()
u1 = (
sess.query(User)
.options(
defaultload(User.addresses).lazyload(Address.dingalings)
)
.first()
)
for ad in u1.addresses:
ad.dingalings
l2 = len(lru)
eq_(l1, 0)
eq_(l2, 2)
def test_safe_bound_option_allows_bake(self):
User, Address, Dingaling = self._o2m_twolevel_fixture(lazy="joined")
lru = Address.dingalings.property._lazy_strategy._bakery(
lambda q: None
)._bakery
l1 = len(lru)
for i in range(5):
sess = Session()
u1 = (
sess.query(User)
.options(
Load(User)
.defaultload(User.addresses)
.lazyload(Address.dingalings)
)
.first()
)
for ad in u1.addresses:
ad.dingalings
l2 = len(lru)
eq_(l1, 0)
eq_(l2, 2)
def test_baked_lazy_loading_relationship_flag_true(self):
self._test_baked_lazy_loading_relationship_flag(True)
def test_baked_lazy_loading_relationship_flag_false(self):
self._test_baked_lazy_loading_relationship_flag(False)
def _test_baked_lazy_loading_relationship_flag(self, flag):
User, Address = self._o2m_fixture(bake_queries=flag)
sess = Session()
u1 = sess.query(User).first()
from sqlalchemy.orm import Query
canary = mock.Mock()
# I would think Mock can do this but apparently
# it cannot (wrap / autospec don't work together)
real_compile_context = Query._compile_context
def _my_compile_context(*arg, **kw):
if arg[0].column_descriptions[0]["entity"] is Address:
canary()
return real_compile_context(*arg, **kw)
with mock.patch.object(Query, "_compile_context", _my_compile_context):
u1.addresses
sess.expire(u1)
u1.addresses
if flag:
eq_(canary.call_count, 1)
else:
eq_(canary.call_count, 2)
def test_baked_lazy_loading_option_o2m(self):
User, Address = self._o2m_fixture()
self._test_baked_lazy_loading(set_option=True)
def test_baked_lazy_loading_mapped_o2m(self):
User, Address = self._o2m_fixture(lazy="baked_select")
self._test_baked_lazy_loading(set_option=False)
def _test_baked_lazy_loading(self, set_option):
User, Address = self.classes.User, self.classes.Address
base_bq = self.bakery(lambda s: s.query(User))
if set_option:
base_bq += lambda q: q.options(lazyload(User.addresses))
base_bq += lambda q: q.order_by(User.id)
assert_result = self.static.user_address_result
for i in range(4):
for cond1, cond2 in itertools.product(
*[(False, True) for j in range(2)]
):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(User.name == "jack")
else:
bq += lambda q: q.filter(User.name.like("%ed%"))
if cond2:
ct = func.count(Address.id).label("count")
subq = (
sess.query(ct, Address.user_id)
.group_by(Address.user_id)
.having(ct > 2)
.subquery()
)
bq += lambda q: q.join(subq)
if cond2:
if cond1:
def go():
result = bq(sess).all()
eq_([], result)
self.assert_sql_count(testing.db, go, 1)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:2], result)
self.assert_sql_count(testing.db, go, 2)
else:
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 2)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:3], result)
self.assert_sql_count(testing.db, go, 3)
sess.close()
def test_baked_lazy_loading_m2o(self):
User, Address = self._m2o_fixture()
base_bq = self.bakery(lambda s: s.query(Address))
base_bq += lambda q: q.options(lazyload(Address.user))
base_bq += lambda q: q.order_by(Address.id)
assert_result = self.static.address_user_result
for i in range(4):
for cond1 in (False, True):
bq = base_bq._clone()
sess = Session()
if cond1:
bq += lambda q: q.filter(
Address.email_address == "jack@bean.com"
)
else:
bq += lambda q: q.filter(
Address.email_address.like("ed@%")
)
if cond1:
def go():
result = bq(sess).all()
eq_(assert_result[0:1], result)
self.assert_sql_count(testing.db, go, 2)
else:
def go():
result = bq(sess).all()
eq_(assert_result[1:4], result)
self.assert_sql_count(testing.db, go, 2)
sess.close()
def test_useget_cancels_eager(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side."""
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(
Address,
self.tables.addresses,
properties={
"user": relationship(
User,
lazy="joined",
backref=backref("addresses", lazy="baked_select"),
)
},
)
sess = Session()
u1 = sess.query(User).filter(User.id == 8).one()
def go():
eq_(u1.addresses[0].user, u1)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE :param_1 = "
"addresses.user_id",
{"param_1": 8},
),
)
def test_useget_cancels_eager_propagated_present(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side, even when a propagated
option is present."""
User = self.classes.User
Address = self.classes.Address
mapper(User, self.tables.users)
mapper(
Address,
self.tables.addresses,
properties={
"user": relationship(
User,
lazy="joined",
backref=backref("addresses", lazy="baked_select"),
)
},
)
from sqlalchemy.orm.interfaces import MapperOption
class MyBogusOption(MapperOption):
propagate_to_loaders = True
sess = Session()
u1 = (
sess.query(User)
.options(MyBogusOption())
.filter(User.id == 8)
.one()
)
def go():
eq_(u1.addresses[0].user, u1)
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE :param_1 = "
"addresses.user_id",
{"param_1": 8},
),
)
def test_simple_lazy_clause_no_race_on_generate(self):
User, Address = self._o2m_fixture()
(
expr1,
paramdict1,
) = User.addresses.property._lazy_strategy._simple_lazy_clause
# delete the attr, as though a concurrent thread is also generating it
del User.addresses.property._lazy_strategy._simple_lazy_clause
(
expr2,
paramdict2,
) = User.addresses.property._lazy_strategy._simple_lazy_clause
eq_(paramdict1, paramdict2)
# additional tests:
# 1. m2m w lazyload
# 2. o2m lazyload where m2o backrefs have an eager load, test
# that eager load is canceled out
# 3. uselist = False, uselist=False assertion
# assert that the integration style illustrated in the dogpile.cache
# example works w/ baked
class CustomIntegrationTest(testing.AssertsCompiledSQL, BakedTest):
run_setup_mappers = "each"
def _o2m_fixture(self, lazy="select", **kw):
User = self.classes.User
Address = self.classes.Address
mapper(
User,
self.tables.users,
properties={
"addresses": relationship(
Address,
order_by=self.tables.addresses.c.id,
lazy=lazy,
**kw
)
},
)
mapper(Address, self.tables.addresses)
return User, Address
def _query_fixture(self):
from sqlalchemy.orm.query import Query, _generative
class CachingQuery(Query):
cache = {}
@_generative()
def set_cache_key(self, key):
self._cache_key = key
def __iter__(self):
super_ = super(CachingQuery, self)
if hasattr(self, "_cache_key"):
return self.get_value(
createfunc=lambda: list(super_.__iter__())
)
else:
return super_.__iter__()
def _execute_and_instances(self, context):
super_ = super(CachingQuery, self)
if context.query is not self and hasattr(self, "_cache_key"):
return self.get_value(
createfunc=lambda: list(
super_._execute_and_instances(context)
)
)
else:
return super_._execute_and_instances(context)
def get_value(self, createfunc):
if self._cache_key in self.cache:
return iter(self.cache[self._cache_key])
else:
self.cache[self._cache_key] = retval = createfunc()
return iter(retval)
return Session(query_cls=CachingQuery)
def _option_fixture(self):
from sqlalchemy.orm.interfaces import MapperOption
class RelationshipCache(MapperOption):
propagate_to_loaders = True
def process_query_conditionally(self, query):
if query._current_path:
query._cache_key = "user7_addresses"
def _generate_cache_key(self, path):
return None
return RelationshipCache()
def test_non_baked(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).set_cache_key("user7")
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
eq_(q.cache, {"user7": [User(id=7, addresses=[Address(id=1)])]})
eq_(q.all(), [User(id=7, addresses=[Address(id=1)])])
def test_use_w_baked(self):
User, Address = self._o2m_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
base_bq = self.bakery(lambda s: s.query(User))
base_bq += lambda q: q.filter(User.id == 7)
base_bq += lambda q: q.set_cache_key("user7")
eq_(base_bq(sess).all(), [User(id=7, addresses=[Address(id=1)])])
eq_(q.cache, {"user7": [User(id=7, addresses=[Address(id=1)])]})
eq_(base_bq(sess).all(), [User(id=7, addresses=[Address(id=1)])])
def test_plain_w_baked_lazyload(self):
User, Address = self._o2m_fixture()
opt = self._option_fixture()
sess = self._query_fixture()
q = sess._query_cls
eq_(q.cache, {})
q = sess.query(User).filter(User.id == 7).options(opt)
u = q.first()
eq_(u.addresses, [Address(id=1)])
eq_(q.cache, {"user7_addresses": [Address(id=1)]})
sess.close()
# ensure caching logic works after query has been baked
q.cache.clear()
u = q.first()
eq_(u.addresses, [Address(id=1)])
eq_(q.cache, {"user7_addresses": [Address(id=1)]})
|
|
import json
import logging
import webapp2
from datetime import datetime
from google.appengine.ext import ndb
from controllers.api.api_base_controller import ApiBaseController
from database.event_query import EventListQuery
from helpers.award_helper import AwardHelper
from helpers.event_insights_helper import EventInsightsHelper
from helpers.model_to_dict import ModelToDict
from models.event import Event
class ApiEventController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_controller_{}" # (event_key)
CACHE_VERSION = 6
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventController, self).__init__(*args, **kw)
self.event_key = self.request.route_kwargs["event_key"]
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
@property
def _validators(self):
return [("event_id_validator", self.event_key)]
def _set_event(self, event_key):
self.event = Event.get_by_id(event_key)
if self.event is None:
self._errors = json.dumps({"404": "%s event not found" % self.event_key})
self.abort(404)
def _track_call(self, event_key):
self._track_call_defer('event', event_key)
def _render(self, event_key):
self._set_event(event_key)
event_dict = ModelToDict.eventConverter(self.event)
return json.dumps(event_dict, ensure_ascii=True)
class ApiEventTeamsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_teams_controller_{}" # (event_key)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventTeamsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/teams', event_key)
def _render(self, event_key):
self._set_event(event_key)
teams = filter(None, self.event.teams)
team_dicts = [ModelToDict.teamConverter(team) for team in teams]
return json.dumps(team_dicts, ensure_ascii=True)
class ApiEventMatchesController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_matches_controller_{}" # (event_key)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventMatchesController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/matches', event_key)
def _render(self, event_key):
self._set_event(event_key)
matches = self.event.matches
match_dicts = [ModelToDict.matchConverter(match) for match in matches]
return json.dumps(match_dicts, ensure_ascii=True)
class ApiEventStatsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_stats_controller_{}" # (event_key)
CACHE_VERSION = 5
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventStatsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/stats', event_key)
def _render(self, event_key):
self._set_event(event_key)
stats = {}
matchstats = self.event.matchstats
if matchstats:
for stat in ['oprs', 'dprs', 'ccwms']:
if stat in matchstats:
stats[stat] = matchstats[stat]
year_specific = EventInsightsHelper.calculate_event_insights(self.event.matches, self.event.year)
if year_specific:
stats['year_specific'] = year_specific
return json.dumps(stats)
class ApiEventRankingsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_rankings_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventRankingsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/rankings', event_key)
def _render(self, event_key):
self._set_event(event_key)
ranks = json.dumps(Event.get_by_id(event_key).rankings)
if ranks is None or ranks == 'null':
return '[]'
else:
return ranks
class ApiEventAwardsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_awards_controller_{}" # (event_key)
CACHE_VERSION = 4
CACHE_HEADER_LENGTH = 60 * 60
def __init__(self, *args, **kw):
super(ApiEventAwardsController, self).__init__(*args, **kw)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/awards', event_key)
def _render(self,event_key):
self._set_event(event_key)
award_dicts = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.event.awards)]
return json.dumps(award_dicts, ensure_ascii=True)
class ApiEventDistrictPointsController(ApiEventController):
CACHE_KEY_FORMAT = "apiv2_event_district_points_controller_{}" # (event_key)
CACHE_VERSION = 1
CACHE_HEADER_LENGTH = 61
def __init__(self, *args, **kw):
super(ApiEventDistrictPointsController, self).__init__(*args, **kw)
self.partial_cache_key = self.CACHE_KEY_FORMAT.format(self.event_key)
def _track_call(self, event_key):
self._track_call_defer('event/district_points', event_key)
def _render(self, event_key):
self._set_event(event_key)
points = self.event.district_points
return json.dumps(points, ensure_ascii=True)
class ApiEventListController(ApiBaseController):
CACHE_KEY_FORMAT = "apiv2_event_list_controller_{}" # (year)
CACHE_VERSION = 3
CACHE_HEADER_LENGTH = 60 * 60 * 24
def __init__(self, *args, **kw):
super(ApiEventListController, self).__init__(*args, **kw)
self.year = int(self.request.route_kwargs.get("year") or datetime.now().year)
self._partial_cache_key = self.CACHE_KEY_FORMAT.format(self.year)
@property
def _validators(self):
return []
def _track_call(self, *args, **kw):
self._track_call_defer('event/list', self.year)
def _render(self, year=None):
if self.year < 1992 or self.year > datetime.now().year + 1:
self._errors = json.dumps({"404": "No events found for %s" % self.year})
self.abort(404)
events = EventListQuery(self.year).fetch()
event_list = [ModelToDict.eventConverter(event) for event in events]
return json.dumps(event_list, ensure_ascii=True)
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import sys
import gevent.monkey
gevent.monkey.patch_all()
import logging
import tempfile
from pprint import pformat
import coverage
import fixtures
import testtools
from testtools import content
from flexmock import flexmock
from webtest import TestApp
import contextlib
from vnc_api.vnc_api import *
import cfgm_common.vnc_cpu_info
import cfgm_common.ifmap.client as ifmap_client
import kombu
import discoveryclient.client as disc_client
import cfgm_common.zkclient
from cfgm_common.uve.vnc_api.ttypes import VncApiConfigLog
from cfgm_common import imid
from cfgm_common.utils import cgitb_hook
from test_utils import *
import bottle
bottle.catchall=False
import inspect
import novaclient
import novaclient.client
import gevent.wsgi
import uuid
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
# end lineno
# import from package for non-api server test or directly from file
sys.path.insert(0, '../../../../build/production/api-lib/vnc_api')
sys.path.insert(0, '../../../../distro/openstack/')
sys.path.append('../../../../build/production/config/api-server/vnc_cfg_api_server')
sys.path.append("../config/api-server/vnc_cfg_api_server")
sys.path.insert(0, '../../../../build/production/discovery/discovery')
try:
import vnc_cfg_api_server
if not hasattr(vnc_cfg_api_server, 'main'):
from vnc_cfg_api_server import vnc_cfg_api_server
except ImportError:
vnc_cfg_api_server = 'vnc_cfg_api_server could not be imported'
try:
import to_bgp
except ImportError:
try:
from schema_transformer import to_bgp
except ImportError:
to_bgp = 'to_bgp could not be imported'
try:
import svc_monitor
if not hasattr(svc_monitor, 'main'):
from svc_monitor import svc_monitor
except ImportError:
svc_monitor = 'svc_monitor could not be imported'
try:
import device_manager
if not hasattr(device_manager, 'main'):
from device_manager import device_manager
except ImportError:
device_manager = 'device_manager could not be imported'
try:
from discovery import disc_server
if not hasattr(disc_server, 'main'):
from disc_server import disc_server
except ImportError:
disc_server = 'disc_server could not be imported'
def generate_conf_file_contents(conf_sections):
cfg_parser = ConfigParser.RawConfigParser()
for (section, var, val) in conf_sections:
try:
cfg_parser.add_section(section)
except ConfigParser.DuplicateSectionError:
pass
if not var:
continue
if val == '':
cfg_parser.set(section, var, 'empty')
else:
cfg_parser.set(section, var, val)
return cfg_parser
# end generate_conf_file_contents
def generate_logconf_file_contents():
cfg_parser = ConfigParser.RawConfigParser()
cfg_parser.add_section('formatters')
cfg_parser.add_section('formatter_simple')
cfg_parser.set('formatters', 'keys', 'simple')
cfg_parser.set('formatter_simple', 'format', '%(name)s:%(levelname)s: %(message)s')
cfg_parser.add_section('handlers')
cfg_parser.add_section('handler_console')
cfg_parser.add_section('handler_api_server_file')
cfg_parser.set('handlers', 'keys', 'console,api_server_file')
cfg_parser.set('handler_console', 'class', 'StreamHandler')
cfg_parser.set('handler_console', 'level', 'WARN')
cfg_parser.set('handler_console', 'args', '[]')
cfg_parser.set('handler_console', 'formatter', 'simple')
cfg_parser.set('handler_api_server_file', 'class', 'FileHandler')
cfg_parser.set('handler_api_server_file', 'level', 'INFO')
cfg_parser.set('handler_api_server_file', 'formatter', 'simple')
cfg_parser.set('handler_api_server_file', 'args', "('api_server.log',)")
cfg_parser.add_section('loggers')
cfg_parser.add_section('logger_root')
cfg_parser.add_section('logger_FakeWSGIHandler')
cfg_parser.set('loggers', 'keys', 'root,FakeWSGIHandler')
cfg_parser.set('logger_root', 'level', 'WARN')
cfg_parser.set('logger_root', 'handlers', 'console')
cfg_parser.set('logger_FakeWSGIHandler', 'level', 'INFO')
cfg_parser.set('logger_FakeWSGIHandler', 'qualname', 'FakeWSGIHandler')
cfg_parser.set('logger_FakeWSGIHandler', 'handlers', 'api_server_file')
return cfg_parser
# end generate_logconf_file_contents
def launch_disc_server(test_id, listen_ip, listen_port, http_server_port, conf_sections):
args_str = ""
args_str = args_str + "--listen_ip_addr %s " % (listen_ip)
args_str = args_str + "--listen_port %s " % (listen_port)
args_str = args_str + "--http_server_port %s " % (http_server_port)
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--ttl_min 30 "
args_str = args_str + "--ttl_max 60 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file discovery_server_%s.log " % test_id
import cgitb
cgitb.enable(format='text')
with tempfile.NamedTemporaryFile() as conf, tempfile.NamedTemporaryFile() as logconf:
cfg_parser = generate_conf_file_contents(conf_sections)
cfg_parser.write(conf)
conf.flush()
cfg_parser = generate_logconf_file_contents()
cfg_parser.write(logconf)
logconf.flush()
args_str = args_str + "--conf_file %s " %(conf.name)
disc_server.main(args_str)
#end launch_disc_server
def launch_api_server(test_id, listen_ip, listen_port, http_server_port,
admin_port, conf_sections):
args_str = ""
args_str = args_str + "--listen_ip_addr %s " % (listen_ip)
args_str = args_str + "--listen_port %s " % (listen_port)
args_str = args_str + "--http_server_port %s " % (http_server_port)
args_str = args_str + "--admin_port %s " % (admin_port)
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file api_server_%s.log " %(test_id)
import cgitb
cgitb.enable(format='text')
with tempfile.NamedTemporaryFile() as conf, tempfile.NamedTemporaryFile() as logconf:
cfg_parser = generate_conf_file_contents(conf_sections)
cfg_parser.write(conf)
conf.flush()
cfg_parser = generate_logconf_file_contents()
cfg_parser.write(logconf)
logconf.flush()
args_str = args_str + "--conf_file %s " %(conf.name)
args_str = args_str + "--logging_conf %s " %(logconf.name)
vnc_cfg_api_server.main(args_str)
#end launch_api_server
def launch_svc_monitor(test_id, api_server_ip, api_server_port):
args_str = ""
args_str = args_str + "--api_server_ip %s " % (api_server_ip)
args_str = args_str + "--api_server_port %s " % (api_server_port)
args_str = args_str + "--http_server_port %s " % (get_free_port())
args_str = args_str + "--ifmap_username api-server "
args_str = args_str + "--ifmap_password api-server "
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file svc_monitor_%s.log " %(test_id)
svc_monitor.main(args_str)
# end launch_svc_monitor
def kill_svc_monitor(glet):
glet.kill()
svc_monitor.SvcMonitor.reset()
def kill_schema_transformer(glet):
glet.kill()
to_bgp.transformer.reset()
def kill_disc_server(glet):
glet.kill()
def launch_schema_transformer(test_id, api_server_ip, api_server_port):
args_str = ""
args_str = args_str + "--api_server_ip %s " % (api_server_ip)
args_str = args_str + "--api_server_port %s " % (api_server_port)
args_str = args_str + "--http_server_port %s " % (get_free_port())
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file schema_transformer_%s.log " %(test_id)
args_str = args_str + "--trace_file schema_transformer_%s.err " %(test_id)
to_bgp.main(args_str)
# end launch_schema_transformer
def launch_device_manager(test_id, api_server_ip, api_server_port):
args_str = ""
args_str = args_str + "--api_server_ip %s " % (api_server_ip)
args_str = args_str + "--api_server_port %s " % (api_server_port)
args_str = args_str + "--http_server_port %s " % (get_free_port())
args_str = args_str + "--cassandra_server_list 0.0.0.0:9160 "
args_str = args_str + "--log_local "
args_str = args_str + "--log_file device_manager_%s.log " %(test_id)
device_manager.main(args_str)
# end launch_device_manager
@contextlib.contextmanager
def flexmocks(mocks):
orig_values = {}
try:
for cls, method_name, val in mocks:
kwargs = {method_name: val}
# save orig cls.method_name
orig_values[(cls, method_name)] = getattr(cls, method_name)
flexmock(cls, **kwargs)
yield
finally:
for (cls, method_name), method in orig_values.items():
setattr(cls, method_name, method)
# end flexmocks
@contextlib.contextmanager
def flexmocks(mocks):
orig_values = {}
try:
for cls, method_name, val in mocks:
kwargs = {method_name: val}
# save orig cls.method_name
orig_values[(cls, method_name)] = getattr(cls, method_name)
flexmock(cls, **kwargs)
yield
finally:
for (cls, method_name), method in orig_values.items():
setattr(cls, method_name, method)
# end flexmocks
def setup_extra_flexmock(mocks):
for (cls, method_name, val) in mocks:
kwargs = {method_name: val}
flexmock(cls, **kwargs)
# end setup_extra_flexmock
def setup_mocks(mod_attr_val_list):
# use setattr instead of flexmock because flexmocks are torndown
# after every test in stopTest whereas these mocks are needed across
# all tests in class
orig_mod_attr_val_list = []
for mod, attr, val in mod_attr_val_list:
orig_mod_attr_val_list.append(
(mod, attr, getattr(mod, attr)))
setattr(mod, attr, val)
return orig_mod_attr_val_list
#end setup_mocks
def teardown_mocks(mod_attr_val_list):
for mod, attr, val in mod_attr_val_list:
setattr(mod, attr, val)
# end teardown_mocks
@contextlib.contextmanager
def patch(target_obj, target_method_name, patched):
orig_method = getattr(target_obj, target_method_name)
def patched_wrapper(*args, **kwargs):
return patched(orig_method, *args, **kwargs)
setattr(target_obj, target_method_name, patched_wrapper)
try:
yield
finally:
setattr(target_obj, target_method_name, orig_method)
#end patch
@contextlib.contextmanager
def patch_imports(imports):
# save original, patch and restore
orig_modules = {}
mocked_modules = []
try:
for import_str, fake in imports:
cur_module = None
for mod_part in import_str.split('.'):
if not cur_module:
cur_module = mod_part
else:
cur_module += "." + mod_part
if cur_module in sys.modules:
orig_modules[cur_module] = sys.modules[cur_module]
else:
mocked_modules.append(cur_module)
sys.modules[cur_module] = fake
yield
finally:
for mod_name, mod in orig_modules.items():
sys.modules[mod_name] = mod
for mod_name in mocked_modules:
del sys.modules[mod_name]
#end patch_import
cov_handle = None
class TestCase(testtools.TestCase, fixtures.TestWithFixtures):
_HTTP_HEADERS = {
'Content-type': 'application/json; charset="UTF-8"',
}
mocks = [
(cfgm_common.vnc_cpu_info.CpuInfo, '__init__',stub),
(novaclient.client, 'Client',FakeNovaClient.initialize),
(ifmap_client.client, '__init__', FakeIfmapClient.initialize),
(ifmap_client.client, 'call', FakeIfmapClient.call),
(ifmap_client.client, 'call_async_result', FakeIfmapClient.call_async_result),
(pycassa.system_manager.Connection, '__init__',stub),
(pycassa.system_manager.SystemManager, '__new__',FakeSystemManager),
(pycassa.ConnectionPool, '__init__',stub),
(pycassa.ColumnFamily, '__new__',FakeCF),
(pycassa.util, 'convert_uuid_to_time',Fake_uuid_to_time),
(disc_client.DiscoveryClient, '__init__',stub),
(disc_client.DiscoveryClient, 'publish_obj',stub),
(disc_client.DiscoveryClient, 'publish',stub),
(disc_client.DiscoveryClient, 'subscribe',stub),
(disc_client.DiscoveryClient, 'syslog',stub),
(kazoo.client.KazooClient, '__new__',FakeKazooClient),
(kazoo.handlers.gevent.SequentialGeventHandler, '__init__',stub),
(kombu.Connection, '__new__',FakeKombu.Connection),
(kombu.Exchange, '__new__',FakeKombu.Exchange),
(kombu.Queue, '__new__',FakeKombu.Queue),
(kombu.Consumer, '__new__',FakeKombu.Consumer),
(kombu.Producer, '__new__',FakeKombu.Producer),
(VncApiConfigLog, '__new__',FakeApiConfigLog),
#(VncApiStatsLog, '__new__',FakeVncApiStatsLog)
]
def __init__(self, *args, **kwargs):
self._logger = logging.getLogger(__name__)
self._assert_till_max_tries = 30
self._config_knobs = [
('DEFAULTS', '', ''),
]
super(TestCase, self).__init__(*args, **kwargs)
self.addOnException(self._add_detailed_traceback)
# list of sockets allocated from system for test case
self._allocated_sockets = []
def _add_detailed_traceback(self, exc_info):
import cgitb
cgitb.enable(format='text')
from cStringIO import StringIO
tmp_file = StringIO()
cgitb_hook(format="text", file=tmp_file, info=exc_info)
tb_str = tmp_file.getvalue()
tmp_file.close()
self.addDetail('detailed-traceback', content.text_content(tb_str))
def _add_detail(self, detail_str):
frame = inspect.stack()[1]
self.addDetail('%s:%s ' %(frame[1],frame[2]), content.text_content(detail_str))
def _add_request_detail(self, op, url, headers=None, query_params=None,
body=None):
request_str = ' URL: ' + pformat(url) + \
' OPER: ' + pformat(op) + \
' Headers: ' + pformat(headers) + \
' Query Params: ' + pformat(query_params) + \
' Body: ' + pformat(body)
self._add_detail('Requesting: ' + request_str)
def _http_get(self, uri, query_params=None):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('GET', url, headers=self._HTTP_HEADERS,
query_params=query_params)
response = self._api_server_session.get(url, headers=self._HTTP_HEADERS,
params=query_params)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_get
def _http_post(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('POST', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.post(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_post
def _http_delete(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('DELETE', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.delete(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_delete
def _http_put(self, uri, body):
url = "http://%s:%s%s" % (self._api_server_ip, self._api_server_port, uri)
self._add_request_detail('PUT', url, headers=self._HTTP_HEADERS, body=body)
response = self._api_server_session.put(url, data=body,
headers=self._HTTP_HEADERS)
self._add_detail('Received Response: ' +
pformat(response.status_code) +
pformat(response.text))
return (response.status_code, response.text)
#end _http_put
def _create_test_objects(self, count=1):
ret_objs = []
for i in range(count):
obj_name = self.id() + '-vn-' + str(i)
obj = VirtualNetwork(obj_name)
self._add_detail('creating-object ' + obj_name)
self._vnc_lib.virtual_network_create(obj)
ret_objs.append(obj)
return ret_objs
def _create_test_object(self):
return self._create_test_objects()[0]
def _delete_test_object(self, obj):
self._vnc_lib.virtual_network_delete(id=obj.uuid)
def ifmap_has_ident(self, obj=None, id=None):
if obj:
_type = obj.get_type()
_fq_name = obj.get_fq_name()
if id:
_type = self._vnc_lib.id_to_fq_name_type(id)
_fq_name = self._vnc_lib.id_to_fq_name(id)
ifmap_id = imid.get_ifmap_id_from_fq_name(_type, _fq_name)
if ifmap_id in FakeIfmapClient._graph:
return True
return False
def ifmap_doesnt_have_ident(self, obj=None, id=None):
return not self.ifmap_has_ident(obj, id)
def assertTill(self, expr_or_cb, *cb_args, **cb_kwargs):
tries = 0
while True:
if callable(expr_or_cb):
ret = expr_or_cb(*cb_args, **cb_kwargs)
else:
ret = eval(expr_or_cb)
if ret:
break
tries = tries + 1
if tries >= self._assert_till_max_tries:
raise Exception('Max retries')
self._logger.warn('Retrying at ' + str(inspect.stack()[1]))
gevent.sleep(2)
def setUp(self, extra_mocks=None, extra_config_knobs=None):
super(TestCase, self).setUp()
global cov_handle
if not cov_handle:
cov_handle = coverage.coverage(source=['./'], omit=['.venv/*'])
#cov_handle.start()
cfgm_common.zkclient.LOG_DIR = './'
gevent.wsgi.WSGIServer.handler_class = FakeWSGIHandler
setup_mocks(self.mocks + (extra_mocks or []))
if extra_config_knobs:
self._config_knobs.extend(extra_config_knobs)
self._api_server_ip = socket.gethostbyname(socket.gethostname())
self._api_server_port = get_free_port(self._allocated_sockets)
http_server_port = get_free_port(self._allocated_sockets)
self._api_admin_port = get_free_port(self._allocated_sockets)
self._api_svr_greenlet = gevent.spawn(launch_api_server,
self.id(),
self._api_server_ip, self._api_server_port,
http_server_port, self._api_admin_port,
self._config_knobs)
block_till_port_listened(self._api_server_ip, self._api_server_port)
extra_env = {'HTTP_HOST':'%s%s' %(self._api_server_ip,
self._api_server_port)}
self._api_svr_app = TestApp(bottle.app(), extra_environ=extra_env)
self._vnc_lib = VncApi('u', 'p', api_server_host=self._api_server_ip,
api_server_port=self._api_server_port)
FakeNovaClient.vnc_lib = self._vnc_lib
self._api_server_session = requests.Session()
adapter = requests.adapters.HTTPAdapter()
self._api_server_session.mount("http://", adapter)
self._api_server_session.mount("https://", adapter)
self._api_server = vnc_cfg_api_server.server
self._api_server._sandesh.set_logging_level(level="SYS_DEBUG")
self.addCleanup(self.cleanUp)
# end setUp
def cleanUp(self):
self._api_svr_greenlet.kill()
self._api_server._db_conn._msgbus.shutdown()
FakeKombu.reset()
FakeIfmapClient.reset()
CassandraCFs.reset()
FakeKazooClient.reset()
FakeExtensionManager.reset()
for sock in self._allocated_sockets:
sock.close()
#cov_handle.stop()
#cov_handle.report(file=open('covreport.txt', 'w'))
# end cleanUp
def get_obj_imid(self, obj):
return 'contrail:%s:%s' %(obj._type, obj.get_fq_name_str())
# end get_obj_imid
def create_virtual_network(self, vn_name, vn_subnet):
vn_obj = VirtualNetwork(name=vn_name)
ipam_fq_name = [
'default-domain', 'default-project', 'default-network-ipam']
ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
subnets = [vn_subnet] if isinstance(vn_subnet, basestring) else vn_subnet
subnet_infos = []
for subnet in subnets:
cidr = subnet.split('/')
pfx = cidr[0]
pfx_len = int(cidr[1])
subnet_infos.append(IpamSubnetType(subnet=SubnetType(pfx, pfx_len)))
subnet_data = VnSubnetsType(subnet_infos)
vn_obj.add_network_ipam(ipam_obj, subnet_data)
self._vnc_lib.virtual_network_create(vn_obj)
vn_obj.clear_pending_updates()
return vn_obj
# end create_virtual_network
def _create_service(self, vn_list, si_name, auto_policy, **kwargs):
sa_set = None
if kwargs.get('service_virtualization_type') == 'physical-device':
pr = PhysicalRouter(si_name)
self._vnc_lib.physical_router_create(pr)
sa_set = ServiceApplianceSet('sa_set-'+si_name)
self._vnc_lib.service_appliance_set_create(sa_set)
sa = ServiceAppliance('sa-'+si_name, parent_obj=sa_set)
for if_type, _ in vn_list:
attr = ServiceApplianceInterfaceType(interface_type=if_type)
pi = PhysicalInterface('pi-'+si_name+if_type, parent_obj=pr)
self._vnc_lib.physical_interface_create(pi)
sa.add_physical_interface(pi, attr)
self._vnc_lib.service_appliance_create(sa)
sti = [ServiceTemplateInterfaceType(k) for k, _ in vn_list]
st_prop = ServiceTemplateType(
flavor='medium',
image_name='junk',
ordered_interfaces=True,
interface_type=sti, **kwargs)
service_template = ServiceTemplate(
name=si_name + 'template',
service_template_properties=st_prop)
if sa_set:
service_template.add_service_appliance_set(sa_set)
self._vnc_lib.service_template_create(service_template)
scale_out = ServiceScaleOutType()
if kwargs.get('service_mode') == 'in-network':
if_list = [ServiceInstanceInterfaceType(virtual_network=vn)
for _, vn in vn_list]
si_props = ServiceInstanceType(auto_policy=auto_policy,
interface_list=if_list,
scale_out=scale_out)
else:
if_list = [ServiceInstanceInterfaceType(),
ServiceInstanceInterfaceType()]
si_props = ServiceInstanceType(interface_list=if_list,
scale_out=scale_out)
service_instance = ServiceInstance(
name=si_name, service_instance_properties=si_props)
service_instance.add_service_template(service_template)
self._vnc_lib.service_instance_create(service_instance)
if kwargs.get('version') == 2:
proj = Project()
pt = PortTuple('pt-'+si_name, parent_obj=service_instance)
self._vnc_lib.port_tuple_create(pt)
for if_type, vn_name in vn_list:
port = VirtualMachineInterface(si_name+if_type, parent_obj=proj)
vmi_props = VirtualMachineInterfacePropertiesType(
service_interface_type=if_type)
vn_obj = self._vnc_lib.virtual_network_read(fq_name_str=vn_name)
port.set_virtual_machine_interface_properties(vmi_props)
port.add_virtual_network(vn_obj)
port.add_port_tuple(pt)
self._vnc_lib.virtual_machine_interface_create(port)
return service_instance.get_fq_name_str()
def create_network_policy(self, vn1, vn2, service_list=None, mirror_service=None,
auto_policy=False, **kwargs):
vn1_name = vn1 if isinstance(vn1, basestring) else vn1.get_fq_name_str()
vn2_name = vn2 if isinstance(vn2, basestring) else vn2.get_fq_name_str()
addr1 = AddressType(virtual_network=vn1_name)
addr2 = AddressType(virtual_network=vn2_name)
port = PortType(-1, 0)
service_name_list = []
si_list = service_list or []
if service_list:
for service in si_list:
service_name_list.append(self._create_service(
[('left', vn1_name), ('right', vn2_name)], service,
auto_policy, **kwargs))
if mirror_service:
mirror_si = self._create_service(
[('left', vn1_name), ('right', vn2_name)], mirror_service, False,
service_mode='transparent', service_type='analyzer')
action_list = ActionListType()
if mirror_service:
mirror = MirrorActionType(analyzer_name=mirror_si)
action_list.mirror_to=mirror
if service_name_list:
action_list.apply_service=service_name_list
else:
action_list.simple_action='pass'
prule = PolicyRuleType(direction="<>", protocol="any",
src_addresses=[addr1], dst_addresses=[addr2],
src_ports=[port], dst_ports=[port],
action_list=action_list)
pentry = PolicyEntriesType([prule])
np = NetworkPolicy(str(uuid.uuid4()), network_policy_entries=pentry)
if auto_policy:
return np
self._vnc_lib.network_policy_create(np)
return np
# end create_network_policy
# end TestCase
|
|
from __future__ import division, print_function, absolute_import
import numpy
import pandas
from sklearn.utils import column_or_1d
from hep_ml.commonutils import check_sample_weight, compute_cut_for_efficiency, compute_knn_indices_of_signal
from hep_ml.metrics_utils import compute_bin_weights, compute_bin_efficiencies, weighted_deviation, \
compute_group_efficiencies_by_indices, theil, prepare_distribution, _ks_2samp_fast, compute_cdf, \
_cvm_2samp_fast
__author__ = 'Alex Rogozhnikov'
def compute_sde_on_bins(y_pred, mask, bin_indices, target_efficiencies, power=2., sample_weight=None):
# ignoring events from other classes
sample_weight = check_sample_weight(y_pred, sample_weight=sample_weight)
y_pred = y_pred[mask]
bin_indices = bin_indices[mask]
sample_weight = sample_weight[mask]
bin_weights = compute_bin_weights(bin_indices=bin_indices, sample_weight=sample_weight)
cuts = compute_cut_for_efficiency(target_efficiencies, mask=numpy.ones(len(y_pred), dtype=bool),
y_pred=y_pred, sample_weight=sample_weight)
result = 0.
for cut in cuts:
bin_efficiencies = compute_bin_efficiencies(y_pred, bin_indices=bin_indices,
cut=cut, sample_weight=sample_weight)
result += weighted_deviation(bin_efficiencies, weights=bin_weights, power=power)
return (result / len(cuts)) ** (1. / power)
def compute_theil_on_bins(y_pred, mask, bin_indices, target_efficiencies, sample_weight):
y_pred = column_or_1d(y_pred)
sample_weight = check_sample_weight(y_pred, sample_weight=sample_weight)
# ignoring events from other classes
y_pred = y_pred[mask]
bin_indices = bin_indices[mask]
sample_weight = sample_weight[mask]
bin_weights = compute_bin_weights(bin_indices=bin_indices, sample_weight=sample_weight)
cuts = compute_cut_for_efficiency(target_efficiencies, mask=numpy.ones(len(y_pred), dtype=bool),
y_pred=y_pred, sample_weight=sample_weight)
result = 0.
for cut in cuts:
bin_efficiencies = compute_bin_efficiencies(y_pred, bin_indices=bin_indices,
cut=cut, sample_weight=sample_weight)
result += theil(bin_efficiencies, weights=bin_weights)
return result / len(cuts)
def compute_sde_on_groups(y_pred, mask, groups_indices, target_efficiencies, sample_weight=None, power=2.):
y_pred = column_or_1d(y_pred)
sample_weight = check_sample_weight(y_pred, sample_weight=sample_weight)
group_weights = compute_group_weights_by_indices(groups_indices, sample_weight=sample_weight)
divided_weight = compute_divided_weight_by_indices(groups_indices, sample_weight=sample_weight * mask)
cuts = compute_cut_for_efficiency(target_efficiencies, mask=mask, y_pred=y_pred, sample_weight=sample_weight)
sde = 0.
for cut in cuts:
group_efficiencies = compute_group_efficiencies_by_indices(y_pred, groups_indices=groups_indices,
cut=cut, divided_weight=divided_weight)
# print('FROM SDE function', cut, group_efficiencies)
sde += weighted_deviation(group_efficiencies, weights=group_weights, power=power)
return (sde / len(cuts)) ** (1. / power)
def compute_theil_on_groups(y_pred, mask, groups_indices, target_efficiencies, sample_weight):
y_pred = column_or_1d(y_pred)
sample_weight = check_sample_weight(y_pred, sample_weight=sample_weight)
groups_weights = compute_group_weights_by_indices(groups_indices, sample_weight=sample_weight)
divided_weight = compute_divided_weight_by_indices(groups_indices, sample_weight=sample_weight * mask)
cuts = compute_cut_for_efficiency(target_efficiencies, mask=mask,
y_pred=y_pred, sample_weight=sample_weight)
result = 0.
for cut in cuts:
groups_efficiencies = compute_group_efficiencies_by_indices(y_pred, groups_indices=groups_indices,
cut=cut, divided_weight=divided_weight)
result += theil(groups_efficiencies, groups_weights)
return result / len(cuts)
def bin_based_ks(y_pred, mask, sample_weight, bin_indices):
"""Kolmogorov-Smirnov flatness on bins"""
assert len(y_pred) == len(sample_weight) == len(bin_indices) == len(mask)
y_pred = y_pred[mask]
sample_weight = sample_weight[mask]
bin_indices = bin_indices[mask]
bin_weights = compute_bin_weights(bin_indices=bin_indices, sample_weight=sample_weight)
prepared_data, prepared_weight, prep_F = prepare_distribution(y_pred, weights=sample_weight)
result = 0.
for bin, bin_weight in enumerate(bin_weights):
if bin_weight <= 0:
continue
local_distribution = y_pred[bin_indices == bin]
local_weights = sample_weight[bin_indices == bin]
result += bin_weight * \
_ks_2samp_fast(prepared_data, local_distribution, prepared_weight, local_weights, prep_F)
return result
def groups_based_ks(y_pred, mask, sample_weight, groups_indices):
"""Kolmogorov-Smirnov flatness on groups """
assert len(y_pred) == len(sample_weight) == len(mask)
group_weights = compute_group_weights_by_indices(groups_indices, sample_weight=sample_weight)
prepared_data, prepared_weight, prep_F = prepare_distribution(y_pred[mask], weights=sample_weight[mask])
result = 0.
for group_weight, group_indices in zip(group_weights, groups_indices):
local_distribution = y_pred[group_indices]
local_weights = sample_weight[group_indices]
result += group_weight * \
_ks_2samp_fast(prepared_data, local_distribution, prepared_weight, local_weights, prep_F)
return result
def cvm_2samp(data1, data2, weights1=None, weights2=None, power=2.):
"""Computes Cramer-von Mises similarity on 2 samples,
CvM = \int |F_2 - F_1|^p dF_1
This implementation sorts the arrays each time,
so inside loops it will be slow"""
weights1 = check_sample_weight(data1, sample_weight=weights1)
weights2 = check_sample_weight(data2, sample_weight=weights2)
weights1 /= numpy.sum(weights1)
weights2 /= numpy.sum(weights2)
data = numpy.unique(numpy.concatenate([data1, data2]))
bins = numpy.append(data, data[-1] + 1)
weights1_new = numpy.histogram(data1, bins=bins, weights=weights1)[0]
weights2_new = numpy.histogram(data2, bins=bins, weights=weights2)[0]
F1 = compute_cdf(weights1_new)
F2 = compute_cdf(weights2_new)
return numpy.average(numpy.abs(F1 - F2) ** power, weights=weights1_new)
def bin_based_cvm(y_pred, sample_weight, bin_indices):
"""Cramer-von Mises similarity, quite slow meanwhile"""
assert len(y_pred) == len(sample_weight) == len(bin_indices)
bin_weights = compute_bin_weights(bin_indices=bin_indices, sample_weight=sample_weight)
result = 0.
global_data, global_weight, global_F = prepare_distribution(y_pred, weights=sample_weight)
for bin, bin_weight in enumerate(bin_weights):
if bin_weight <= 0:
continue
bin_mask = bin_indices == bin
local_distribution = y_pred[bin_mask]
local_weights = sample_weight[bin_mask]
result += bin_weight * _cvm_2samp_fast(global_data, local_distribution,
global_weight, local_weights, global_F)
return result
def group_based_cvm(y_pred, mask, sample_weight, groups_indices):
y_pred = column_or_1d(y_pred)
sample_weight = check_sample_weight(y_pred, sample_weight=sample_weight)
group_weights = compute_group_weights_by_indices(groups_indices, sample_weight=sample_weight)
result = 0.
global_data, global_weight, global_F = prepare_distribution(y_pred[mask], weights=sample_weight[mask])
for group, group_weight in zip(groups_indices, group_weights):
local_distribution = y_pred[group]
local_weights = sample_weight[group]
result += group_weight * _cvm_2samp_fast(global_data, local_distribution,
global_weight, local_weights, global_F)
return result
# endregion
# region Uniformity metrics (old version, reference code for comparison)
"""
Comments on the old interface:
Mask is needed to show the events of needed class,
for instance, if we want to compute the uniformity on signal predictions,
mask should be True on signal events and False on the others.
y_score in usually predicted probabilities of event being a needed class.
So, if I want to compute efficiency on signal, I put:
mask = y == 1
y_pred = clf.predict_proba[:, 1]
If want to do it for bck:
mask = y == 0
y_pred = clf.predict_proba[:, 0]
"""
def sde(y, proba, X, uniform_features, sample_weight=None, label=1, knn=30):
""" The most simple way to compute SDE, this is however very slow
if you need to recompute SDE many times
:param y: real classes of events, shape = [n_samples]
:param proba: predicted probabilities, shape = [n_samples, n_classes]
:param X: pandas.DataFrame with uniform features
:param uniform_features: features, along which uniformity is desired, list of strings
:param sample_weight: weights of events, shape = [n_samples]
:param label: class, for which uniformity is measured (usually, 0 is bck, 1 is signal)
:param knn: number of nearest neighbours used in knn
Example of usage:
proba = classifier.predict_proba(testX)
sde(testY, proba=proba, X=testX, uniform_features=['mass'])
"""
assert len(y) == len(proba) == len(X), 'Different lengths'
X = pandas.DataFrame(X)
mask = y == label
groups = compute_knn_indices_of_signal(X[uniform_features], is_signal=mask, n_neighbours=knn)
groups = groups[mask, :]
return compute_sde_on_groups(proba[:, label], mask=mask, groups_indices=groups,
target_efficiencies=[0.5, 0.6, 0.7, 0.8, 0.9], sample_weight=sample_weight)
def theil_flatness(y, proba, X, uniform_features, sample_weight=None, label=1, knn=30):
"""This is ready-to-use function, and it is quite slow to use many times"""
mask = y == label
groups_indices = compute_knn_indices_of_signal(X[uniform_features], is_signal=mask, n_neighbours=knn)[mask, :]
return compute_theil_on_groups(proba[:, label], mask=mask, groups_indices=groups_indices,
target_efficiencies=[0.5, 0.6, 0.7, 0.8, 0.9], sample_weight=sample_weight)
def cvm_flatness(y, proba, X, uniform_features, sample_weight=None, label=1, knn=30):
""" The most simple way to compute Cramer-von Mises flatness, this is however very slow
if you need to compute it many times
:param y: real classes of events, shape = [n_samples]
:param proba: predicted probabilities, shape = [n_samples, n_classes]
:param X: pandas.DataFrame with uniform features (i.e. test dataset)
:param uniform_features: features, along which uniformity is desired, list of strings
:param sample_weight: weights of events, shape = [n_samples]
:param label: class, for which uniformity is measured (usually, 0 is bck, 1 is signal)
:param knn: number of nearest neighbours used in knn
Example of usage:
proba = classifier.predict_proba(testX)
cvm_flatness(testY, proba=proba, X=testX, uniform_features=['mass'])
"""
assert len(y) == len(proba) == len(X), 'Different lengths'
X = pandas.DataFrame(X)
signal_mask = y == label
groups_indices = compute_knn_indices_of_signal(X[uniform_features], is_signal=signal_mask, n_neighbours=knn)
groups_indices = groups_indices[signal_mask, :]
return group_based_cvm(proba[:, label], mask=signal_mask, groups_indices=groups_indices,
sample_weight=sample_weight)
# endregion
def compute_group_weights_by_indices(group_indices, sample_weight):
"""
Group weight = sum of divided weights of indices inside that group.
"""
divided_weight = compute_divided_weight_by_indices(group_indices, sample_weight=sample_weight)
result = numpy.zeros(len(group_indices))
for i, group in enumerate(group_indices):
result[i] = numpy.sum(divided_weight[group])
return result / numpy.sum(result)
def compute_divided_weight_by_indices(group_indices, sample_weight):
"""Divided weight takes into account that different events
are met different number of times """
indices = numpy.concatenate(group_indices)
occurences = numpy.bincount(indices, minlength=len(sample_weight))
return sample_weight / numpy.maximum(occurences, 1)
|
|
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Documentation on PRESUBMIT.py can be found at:
# http://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
import os
import subprocess
import sys
# List of directories to not apply presubmit project checks, relative
# to the NaCl top directory
EXCLUDE_PROJECT_CHECKS_DIRS = [
# The following contain test data (including automatically generated),
# and do not follow our conventions.
'src/trusted/validator_ragel/testdata/32',
'src/trusted/validator_ragel/testdata/64',
'src/trusted/validator_x86/testdata/32',
'src/trusted/validator_x86/testdata/64',
'src/trusted/validator/x86/decoder/generator/testdata/32',
'src/trusted/validator/x86/decoder/generator/testdata/64',
# The following directories contains automatically generated source,
# which may not follow our conventions.
'src/trusted/validator_x86/gen',
'src/trusted/validator/x86/decoder/gen',
'src/trusted/validator/x86/decoder/generator/gen',
'src/trusted/validator/x86/ncval_seg_sfi/gen',
'src/trusted/validator_arm/gen',
'src/trusted/validator_ragel/gen',
]
NACL_TOP_DIR = os.getcwd()
while not os.path.isfile(os.path.join(NACL_TOP_DIR, 'PRESUBMIT.py')):
NACL_TOP_DIR = os.path.dirname(NACL_TOP_DIR)
assert len(NACL_TOP_DIR) >= 3, "Could not find NaClTopDir"
def CheckGitBranch():
p = subprocess.Popen("git branch -vv", shell=True,
stdout=subprocess.PIPE)
output, _ = p.communicate()
lines = output.split('\n')
for line in lines:
# output format for checked-out branch should be
# * branchname hash [TrackedBranchName ...
toks = line.split()
if '*' not in toks[0]:
continue
if not ('origin/master' in toks[3] or
'origin/refs/heads/master' in toks[3]):
warning = 'Warning: your current branch:\n' + line
warning += '\nis not tracking origin/master. git cl push may silently '
warning += 'fail to push your change. To fix this, do\n'
warning += 'git branch -u origin/master'
return warning
return None
print 'Warning: presubmit check could not determine local git branch'
return None
def _CommonChecks(input_api, output_api):
"""Checks for both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, project_name='Native Client',
excluded_paths=tuple(EXCLUDE_PROJECT_CHECKS_DIRS)))
branch_warning = CheckGitBranch()
if branch_warning:
results.append(output_api.PresubmitPromptWarning(branch_warning))
return results
def IsFileInDirectories(f, dirs):
""" Returns true if f is in list of directories"""
for d in dirs:
if d is os.path.commonprefix([f , d]):
return True
return False
def CheckChangeOnUpload(input_api, output_api):
"""Verifies all changes in all files.
Args:
input_api: the limited set of input modules allowed in presubmit.
output_api: the limited set of output modules allowed in presubmit.
"""
report = []
report.extend(_CommonChecks(input_api, output_api))
# The commit queue assumes PRESUBMIT.py is standalone.
# TODO(bradnelson): Migrate code_hygiene to a common location so that
# it can be used by the commit queue.
old_sys_path = list(sys.path)
try:
sys.path.append(os.path.join(NACL_TOP_DIR, 'tools'))
sys.path.append(os.path.join(NACL_TOP_DIR, 'build'))
import code_hygiene
finally:
sys.path = old_sys_path
del old_sys_path
affected_files = input_api.AffectedFiles(include_deletes=False)
exclude_dirs = [ NACL_TOP_DIR + '/' + x + '/'
for x in EXCLUDE_PROJECT_CHECKS_DIRS ]
for filename in affected_files:
filename = filename.AbsoluteLocalPath()
if not IsFileInDirectories(filename, exclude_dirs):
errors, warnings = code_hygiene.CheckFile(filename, False)
for e in errors:
report.append(output_api.PresubmitError(e, items=errors[e]))
for w in warnings:
report.append(output_api.PresubmitPromptWarning(w, items=warnings[w]))
return report
def CheckChangeOnCommit(input_api, output_api):
"""Verifies all changes in all files and verifies that the
tree is open and can accept a commit.
Args:
input_api: the limited set of input modules allowed in presubmit.
output_api: the limited set of output modules allowed in presubmit.
"""
report = []
report.extend(CheckChangeOnUpload(input_api, output_api))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://nativeclient-status.appspot.com/current?format=json'))
return report
# Note that this list is duplicated in the Commit Queue. If you
# change this list, you should also update the CQ's list here:
# https://chrome-internal.googlesource.com/infra/infra_internal/+/master/commit_queue/projects.py
# (see https://crbug.com/399059).
DEFAULT_TRYBOTS = [
'nacl-precise32_newlib_dbg',
'nacl-precise32_newlib_opt',
'nacl-precise32_glibc_opt',
'nacl-precise64_newlib_dbg',
'nacl-precise64_newlib_opt',
'nacl-precise64_glibc_opt',
'nacl-mac10.6_newlib_opt',
'nacl-mac10.6_glibc_opt',
'nacl-mac10.6_64_newlib_dbg',
'nacl-mac10.6_64_glibc_opt',
'nacl-mac10.7_newlib_opt',
'nacl-mac10.7_glibc_opt',
'nacl-mac10.7_64_newlib_dbg',
'nacl-mac10.7_64_glibc_opt',
'nacl-mac10.8_32_newlib_dbg',
'nacl-mac10.8_32_glibc_opt',
'nacl-mac10.8_64_newlib_dbg',
'nacl-mac10.8_64_glibc_opt',
'nacl-win32_newlib_opt',
'nacl-win32_glibc_opt',
'nacl-win64_newlib_dbg',
'nacl-win64_newlib_opt',
'nacl-win64_glibc_opt',
'nacl-win8-64_newlib_dbg',
'nacl-win8-64_newlib_opt',
'nacl-arm_opt_panda',
# arm-nacl-gcc bots
'nacl-win7_64_arm_newlib_opt',
'nacl-mac10.7_arm_newlib_opt',
'nacl-precise64_arm_newlib_opt',
# Clang bots
'nacl-precise_64-newlib-dbg-clang',
'nacl-mac10.6-newlib-dbg-clang',
# pnacl scons bots
'nacl-precise_64-newlib-arm_qemu-pnacl',
'nacl-precise_64-newlib-x86_32-pnacl',
'nacl-precise_64-newlib-x86_64-pnacl',
'nacl-mac10.8_newlib_opt_pnacl',
'nacl-win7_64_newlib_opt_pnacl',
# pnacl spec2k bots
'nacl-arm_perf_panda',
'nacl-precise_64-newlib-x86_32-pnacl-spec',
'nacl-precise_64-newlib-x86_64-pnacl-spec',
# android
'nacl-precise64-newlib-opt-android',
]
PNACL_TOOLCHAIN_TRYBOTS = [
'nacl-toolchain-linux-pnacl-x86_64',
'nacl-toolchain-linux-pnacl-x86_32',
'nacl-toolchain-mac-pnacl-x86_32',
'nacl-toolchain-win7-pnacl-x86_64',
]
TOOLCHAIN_BUILD_TRYBOTS = [
'nacl-toolchain-precise64-newlib-arm',
'nacl-toolchain-mac-newlib-arm',
] + PNACL_TOOLCHAIN_TRYBOTS
def GetPreferredTryMasters(_, change):
has_pnacl = False
has_toolchain_build = False
has_others = False
for file in change.AffectedFiles(include_dirs=True):
if IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'build'),
os.path.join(NACL_TOP_DIR, 'buildbot'),
os.path.join(NACL_TOP_DIR, 'pynacl')]):
# Buildbot and infrastructure changes should trigger all the try bots.
has_pnacl = True
has_toolchain_build = True
has_others = True
break
elif IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'pnacl')]):
has_pnacl = True
elif IsFileInDirectories(file.AbsoluteLocalPath(),
[os.path.join(NACL_TOP_DIR, 'toolchain_build')]):
has_toolchain_build = True
else:
has_others = True
trybots = []
if has_pnacl:
trybots += PNACL_TOOLCHAIN_TRYBOTS
if has_toolchain_build:
trybots += TOOLCHAIN_BUILD_TRYBOTS
if has_others:
trybots += DEFAULT_TRYBOTS
return {
'tryserver.nacl': { t: set(['defaulttests']) for t in set(trybots) },
}
|
|
from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version
from ..utils import version_lt
from .. import utils
class NetworkApiMixin(object):
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
ids (:py:class:`list`): List of ids to filter by
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]`` or ``label=[<key>=<value>]``.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
(dict): List of network objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
params = {'filters': utils.convert_filters(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False, attachable=None, scope=None,
ingress=None):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> docker_client.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'Options': options,
'IPAM': ipam,
'CheckDuplicate': check_duplicate,
}
if labels is not None:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'network labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
if enable_ipv6:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'enable_ipv6 was introduced in API 1.23'
)
data['EnableIPv6'] = True
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
'supported in API version < 1.22')
data['Internal'] = True
if attachable is not None:
if version_lt(self._version, '1.24'):
raise InvalidVersion(
'attachable is not supported in API version < 1.24'
)
data['Attachable'] = attachable
if ingress is not None:
if version_lt(self._version, '1.29'):
raise InvalidVersion(
'ingress is not supported in API version < 1.29'
)
data['Ingress'] = ingress
if scope is not None:
if version_lt(self._version, '1.30'):
raise InvalidVersion(
'scope is not supported in API version < 1.30'
)
data['Scope'] = scope
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
@minimum_version('1.25')
def prune_networks(self, filters=None):
"""
Delete unused networks
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted network names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/networks/prune')
return self._result(self._post(url, params=params), True)
@check_resource('net_id')
def remove_network(self, net_id):
"""
Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id
"""
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@check_resource('net_id')
def inspect_network(self, net_id, verbose=None, scope=None):
"""
Get detailed information about a network.
Args:
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
"""
params = {}
if verbose is not None:
if version_lt(self._version, '1.28'):
raise InvalidVersion('verbose was introduced in API 1.28')
params['verbose'] = verbose
if scope is not None:
if version_lt(self._version, '1.31'):
raise InvalidVersion('scope was introduced in API 1.31')
params['scope'] = scope
url = self._url("/networks/{0}", net_id)
res = self._get(url, params=params)
return self._result(res, json=True)
@check_resource('container')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None):
"""
Connect a container to a network.
Args:
container (str): container-id/name to be connected to the network
net_id (str): network id
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses.
"""
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips
),
}
url = self._url("/networks/{0}/connect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
@check_resource('container')
def disconnect_container_from_network(self, container, net_id,
force=False):
"""
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
"""
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
|
|
data = (
'mya', # 0x00
'myag', # 0x01
'myagg', # 0x02
'myags', # 0x03
'myan', # 0x04
'myanj', # 0x05
'myanh', # 0x06
'myad', # 0x07
'myal', # 0x08
'myalg', # 0x09
'myalm', # 0x0a
'myalb', # 0x0b
'myals', # 0x0c
'myalt', # 0x0d
'myalp', # 0x0e
'myalh', # 0x0f
'myam', # 0x10
'myab', # 0x11
'myabs', # 0x12
'myas', # 0x13
'myass', # 0x14
'myang', # 0x15
'myaj', # 0x16
'myac', # 0x17
'myak', # 0x18
'myat', # 0x19
'myap', # 0x1a
'myah', # 0x1b
'myae', # 0x1c
'myaeg', # 0x1d
'myaegg', # 0x1e
'myaegs', # 0x1f
'myaen', # 0x20
'myaenj', # 0x21
'myaenh', # 0x22
'myaed', # 0x23
'myael', # 0x24
'myaelg', # 0x25
'myaelm', # 0x26
'myaelb', # 0x27
'myaels', # 0x28
'myaelt', # 0x29
'myaelp', # 0x2a
'myaelh', # 0x2b
'myaem', # 0x2c
'myaeb', # 0x2d
'myaebs', # 0x2e
'myaes', # 0x2f
'myaess', # 0x30
'myaeng', # 0x31
'myaej', # 0x32
'myaec', # 0x33
'myaek', # 0x34
'myaet', # 0x35
'myaep', # 0x36
'myaeh', # 0x37
'meo', # 0x38
'meog', # 0x39
'meogg', # 0x3a
'meogs', # 0x3b
'meon', # 0x3c
'meonj', # 0x3d
'meonh', # 0x3e
'meod', # 0x3f
'meol', # 0x40
'meolg', # 0x41
'meolm', # 0x42
'meolb', # 0x43
'meols', # 0x44
'meolt', # 0x45
'meolp', # 0x46
'meolh', # 0x47
'meom', # 0x48
'meob', # 0x49
'meobs', # 0x4a
'meos', # 0x4b
'meoss', # 0x4c
'meong', # 0x4d
'meoj', # 0x4e
'meoc', # 0x4f
'meok', # 0x50
'meot', # 0x51
'meop', # 0x52
'meoh', # 0x53
'me', # 0x54
'meg', # 0x55
'megg', # 0x56
'megs', # 0x57
'men', # 0x58
'menj', # 0x59
'menh', # 0x5a
'med', # 0x5b
'mel', # 0x5c
'melg', # 0x5d
'melm', # 0x5e
'melb', # 0x5f
'mels', # 0x60
'melt', # 0x61
'melp', # 0x62
'melh', # 0x63
'mem', # 0x64
'meb', # 0x65
'mebs', # 0x66
'mes', # 0x67
'mess', # 0x68
'meng', # 0x69
'mej', # 0x6a
'mec', # 0x6b
'mek', # 0x6c
'met', # 0x6d
'mep', # 0x6e
'meh', # 0x6f
'myeo', # 0x70
'myeog', # 0x71
'myeogg', # 0x72
'myeogs', # 0x73
'myeon', # 0x74
'myeonj', # 0x75
'myeonh', # 0x76
'myeod', # 0x77
'myeol', # 0x78
'myeolg', # 0x79
'myeolm', # 0x7a
'myeolb', # 0x7b
'myeols', # 0x7c
'myeolt', # 0x7d
'myeolp', # 0x7e
'myeolh', # 0x7f
'myeom', # 0x80
'myeob', # 0x81
'myeobs', # 0x82
'myeos', # 0x83
'myeoss', # 0x84
'myeong', # 0x85
'myeoj', # 0x86
'myeoc', # 0x87
'myeok', # 0x88
'myeot', # 0x89
'myeop', # 0x8a
'myeoh', # 0x8b
'mye', # 0x8c
'myeg', # 0x8d
'myegg', # 0x8e
'myegs', # 0x8f
'myen', # 0x90
'myenj', # 0x91
'myenh', # 0x92
'myed', # 0x93
'myel', # 0x94
'myelg', # 0x95
'myelm', # 0x96
'myelb', # 0x97
'myels', # 0x98
'myelt', # 0x99
'myelp', # 0x9a
'myelh', # 0x9b
'myem', # 0x9c
'myeb', # 0x9d
'myebs', # 0x9e
'myes', # 0x9f
'myess', # 0xa0
'myeng', # 0xa1
'myej', # 0xa2
'myec', # 0xa3
'myek', # 0xa4
'myet', # 0xa5
'myep', # 0xa6
'myeh', # 0xa7
'mo', # 0xa8
'mog', # 0xa9
'mogg', # 0xaa
'mogs', # 0xab
'mon', # 0xac
'monj', # 0xad
'monh', # 0xae
'mod', # 0xaf
'mol', # 0xb0
'molg', # 0xb1
'molm', # 0xb2
'molb', # 0xb3
'mols', # 0xb4
'molt', # 0xb5
'molp', # 0xb6
'molh', # 0xb7
'mom', # 0xb8
'mob', # 0xb9
'mobs', # 0xba
'mos', # 0xbb
'moss', # 0xbc
'mong', # 0xbd
'moj', # 0xbe
'moc', # 0xbf
'mok', # 0xc0
'mot', # 0xc1
'mop', # 0xc2
'moh', # 0xc3
'mwa', # 0xc4
'mwag', # 0xc5
'mwagg', # 0xc6
'mwags', # 0xc7
'mwan', # 0xc8
'mwanj', # 0xc9
'mwanh', # 0xca
'mwad', # 0xcb
'mwal', # 0xcc
'mwalg', # 0xcd
'mwalm', # 0xce
'mwalb', # 0xcf
'mwals', # 0xd0
'mwalt', # 0xd1
'mwalp', # 0xd2
'mwalh', # 0xd3
'mwam', # 0xd4
'mwab', # 0xd5
'mwabs', # 0xd6
'mwas', # 0xd7
'mwass', # 0xd8
'mwang', # 0xd9
'mwaj', # 0xda
'mwac', # 0xdb
'mwak', # 0xdc
'mwat', # 0xdd
'mwap', # 0xde
'mwah', # 0xdf
'mwae', # 0xe0
'mwaeg', # 0xe1
'mwaegg', # 0xe2
'mwaegs', # 0xe3
'mwaen', # 0xe4
'mwaenj', # 0xe5
'mwaenh', # 0xe6
'mwaed', # 0xe7
'mwael', # 0xe8
'mwaelg', # 0xe9
'mwaelm', # 0xea
'mwaelb', # 0xeb
'mwaels', # 0xec
'mwaelt', # 0xed
'mwaelp', # 0xee
'mwaelh', # 0xef
'mwaem', # 0xf0
'mwaeb', # 0xf1
'mwaebs', # 0xf2
'mwaes', # 0xf3
'mwaess', # 0xf4
'mwaeng', # 0xf5
'mwaej', # 0xf6
'mwaec', # 0xf7
'mwaek', # 0xf8
'mwaet', # 0xf9
'mwaep', # 0xfa
'mwaeh', # 0xfb
'moe', # 0xfc
'moeg', # 0xfd
'moegg', # 0xfe
'moegs', # 0xff
)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes for calculating the ewald sum of a structure.
"""
from math import pi, sqrt, log
from datetime import datetime
from copy import deepcopy, copy
from warnings import warn
import bisect
import numpy as np
from scipy.special import erfc, comb
import scipy.constants as constants
__author__ = "Shyue Ping Ong, William Davidson Richard"
__copyright__ = "Copyright 2011, The Materials Project"
__credits__ = "Christopher Fischer"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Aug 1 2012"
class EwaldSummation:
"""
Calculates the electrostatic energy of a periodic array of charges using
the Ewald technique.
Ref:
Ewald summation techniques in perspective: a survey
Abdulnour Y. Toukmaji and John A. Board Jr.
DOI: 10.1016/0010-4655(96)00016-1
URL: http://www.ee.duke.edu/~ayt/ewaldpaper/ewaldpaper.html
This matrix can be used to do fast calculations of ewald sums after species
removal.
E = E_recip + E_real + E_point
Atomic units used in the code, then converted to eV.
"""
# Converts unit of q*q/r into eV
CONV_FACT = 1e10 * constants.e / (4 * pi * constants.epsilon_0)
def __init__(self, structure, real_space_cut=None, recip_space_cut=None,
eta=None, acc_factor=12.0, w=1 / sqrt(2), compute_forces=False):
"""
Initializes and calculates the Ewald sum. Default convergence
parameters have been specified, but you can override them if you wish.
Args:
structure (Structure): Input structure that must have proper
Specie on all sites, i.e. Element with oxidation state. Use
Structure.add_oxidation_state... for example.
real_space_cut (float): Real space cutoff radius dictating how
many terms are used in the real space sum. Defaults to None,
which means determine automagically using the formula given
in gulp 3.1 documentation.
recip_space_cut (float): Reciprocal space cutoff radius.
Defaults to None, which means determine automagically using
the formula given in gulp 3.1 documentation.
eta (float): The screening parameter. Defaults to None, which means
determine automatically.
acc_factor (float): No. of significant figures each sum is
converged to.
w (float): Weight parameter, w, has been included that represents
the relative computational expense of calculating a term in
real and reciprocal space. Default of 0.7 reproduces result
similar to GULP 4.2. This has little effect on the total
energy, but may influence speed of computation in large
systems. Note that this parameter is used only when the
cutoffs are set to None.
compute_forces (bool): Whether to compute forces. False by
default since it is usually not needed.
"""
self._s = structure
self._charged = abs(structure.charge) > 1e-8
self._vol = structure.volume
self._compute_forces = compute_forces
self._acc_factor = acc_factor
# set screening length
self._eta = eta if eta \
else (len(structure) * w / (self._vol ** 2)) ** (1 / 3) * pi
self._sqrt_eta = sqrt(self._eta)
# acc factor used to automatically determine the optimal real and
# reciprocal space cutoff radii
self._accf = sqrt(log(10 ** acc_factor))
self._rmax = real_space_cut if real_space_cut \
else self._accf / self._sqrt_eta
self._gmax = recip_space_cut if recip_space_cut \
else 2 * self._sqrt_eta * self._accf
# The next few lines pre-compute certain quantities and store them.
# Ewald summation is rather expensive, and these shortcuts are
# necessary to obtain several factors of improvement in speedup.
self._oxi_states = [compute_average_oxidation_state(site)
for site in structure]
self._coords = np.array(self._s.cart_coords)
# Now we call the relevant private methods to calculate the reciprocal
# and real space terms.
(self._recip, recip_forces) = self._calc_recip()
(self._real, self._point, real_point_forces) = \
self._calc_real_and_point()
if self._compute_forces:
self._forces = recip_forces + real_point_forces
# Compute the correction for a charged cell
self._charged_cell_energy = - EwaldSummation.CONV_FACT / 2 * np.pi / \
structure.volume / self._eta * structure.charge ** 2
def compute_partial_energy(self, removed_indices):
"""
Gives total ewald energy for certain sites being removed, i.e. zeroed
out.
"""
total_energy_matrix = self.total_energy_matrix.copy()
for i in removed_indices:
total_energy_matrix[i, :] = 0
total_energy_matrix[:, i] = 0
return sum(sum(total_energy_matrix))
def compute_sub_structure(self, sub_structure, tol=1e-3):
"""
Gives total ewald energy for an sub structure in the same
lattice. The sub_structure must be a subset of the original
structure, with possible different charges.
Args:
substructure (Structure): Substructure to compute Ewald sum for.
tol (float): Tolerance for site matching in fractional coordinates.
Returns:
Ewald sum of substructure.
"""
total_energy_matrix = self.total_energy_matrix.copy()
def find_match(site):
for test_site in sub_structure:
frac_diff = abs(np.array(site.frac_coords)
- np.array(test_site.frac_coords)) % 1
frac_diff = [abs(a) < tol or abs(a) > 1 - tol
for a in frac_diff]
if all(frac_diff):
return test_site
return None
matches = []
for i, site in enumerate(self._s):
matching_site = find_match(site)
if matching_site:
new_charge = compute_average_oxidation_state(matching_site)
old_charge = self._oxi_states[i]
scaling_factor = new_charge / old_charge
matches.append(matching_site)
else:
scaling_factor = 0
total_energy_matrix[i, :] *= scaling_factor
total_energy_matrix[:, i] *= scaling_factor
if len(matches) != len(sub_structure):
output = ["Missing sites."]
for site in sub_structure:
if site not in matches:
output.append("unmatched = {}".format(site))
raise ValueError("\n".join(output))
return sum(sum(total_energy_matrix))
@property
def reciprocal_space_energy(self):
"""
The reciprocal space energy.
"""
return sum(sum(self._recip))
@property
def reciprocal_space_energy_matrix(self):
"""
The reciprocal space energy matrix. Each matrix element (i, j)
corresponds to the interaction energy between site i and site j in
reciprocal space.
"""
return self._recip
@property
def real_space_energy(self):
"""
The real space space energy.
"""
return sum(sum(self._real))
@property
def real_space_energy_matrix(self):
"""
The real space energy matrix. Each matrix element (i, j) corresponds to
the interaction energy between site i and site j in real space.
"""
return self._real
@property
def point_energy(self):
"""
The point energy.
"""
return sum(self._point)
@property
def point_energy_matrix(self):
"""
The point space matrix. A diagonal matrix with the point terms for each
site in the diagonal elements.
"""
return self._point
@property
def total_energy(self):
"""
The total energy.
"""
return sum(sum(self._recip)) + sum(sum(self._real)) + sum(self._point) + self._charged_cell_energy
@property
def total_energy_matrix(self):
"""
The total energy matrix. Each matrix element (i, j) corresponds to the
total interaction energy between site i and site j.
Note that this does not include the charged-cell energy, which is only important
when the simulation cell is not charge balanced.
"""
totalenergy = self._recip + self._real
for i in range(len(self._point)):
totalenergy[i, i] += self._point[i]
return totalenergy
@property
def forces(self):
"""
The forces on each site as a Nx3 matrix. Each row corresponds to a
site.
"""
if not self._compute_forces:
raise AttributeError(
"Forces are available only if compute_forces is True!")
return self._forces
def get_site_energy(self, site_index):
"""Compute the energy for a single site in the structure
Args:
site_index (int): Index of site
ReturnS:
(float) - Energy of that site"""
if self._charged:
warn('Per atom energies for charged structures not supported in EwaldSummation')
return np.sum(self._recip[:, site_index]) + np.sum(self._real[:, site_index]) + self._point[site_index]
def _calc_recip(self):
"""
Perform the reciprocal space summation. Calculates the quantity
E_recip = 1/(2PiV) sum_{G < Gmax} exp(-(G.G/4/eta))/(G.G) S(G)S(-G)
where
S(G) = sum_{k=1,N} q_k exp(-i G.r_k)
S(G)S(-G) = |S(G)|**2
This method is heavily vectorized to utilize numpy's C backend for
speed.
"""
numsites = self._s.num_sites
prefactor = 2 * pi / self._vol
erecip = np.zeros((numsites, numsites), dtype=np.float)
forces = np.zeros((numsites, 3), dtype=np.float)
coords = self._coords
rcp_latt = self._s.lattice.reciprocal_lattice
recip_nn = rcp_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0],
self._gmax)
frac_coords = [fcoords for (fcoords, dist, i, img) in recip_nn if dist != 0]
gs = rcp_latt.get_cartesian_coords(frac_coords)
g2s = np.sum(gs ** 2, 1)
expvals = np.exp(-g2s / (4 * self._eta))
grs = np.sum(gs[:, None] * coords[None, :], 2)
oxistates = np.array(self._oxi_states)
# create array where q_2[i,j] is qi * qj
qiqj = oxistates[None, :] * oxistates[:, None]
# calculate the structure factor
sreals = np.sum(oxistates[None, :] * np.cos(grs), 1)
simags = np.sum(oxistates[None, :] * np.sin(grs), 1)
for g, g2, gr, expval, sreal, simag in zip(gs, g2s, grs, expvals,
sreals, simags):
# Uses the identity sin(x)+cos(x) = 2**0.5 sin(x + pi/4)
m = (gr[None, :] + pi / 4) - gr[:, None]
np.sin(m, m)
m *= expval / g2
erecip += m
if self._compute_forces:
pref = 2 * expval / g2 * oxistates
factor = prefactor * pref * (
sreal * np.sin(gr) - simag * np.cos(gr))
forces += factor[:, None] * g[None, :]
forces *= EwaldSummation.CONV_FACT
erecip *= prefactor * EwaldSummation.CONV_FACT * qiqj * 2 ** 0.5
return erecip, forces
def _calc_real_and_point(self):
"""
Determines the self energy -(eta/pi)**(1/2) * sum_{i=1}^{N} q_i**2
"""
fcoords = self._s.frac_coords
forcepf = 2.0 * self._sqrt_eta / sqrt(pi)
coords = self._coords
numsites = self._s.num_sites
ereal = np.empty((numsites, numsites), dtype=np.float)
forces = np.zeros((numsites, 3), dtype=np.float)
qs = np.array(self._oxi_states)
epoint = - qs ** 2 * sqrt(self._eta / pi)
for i in range(numsites):
nfcoords, rij, js, _ = self._s.lattice.get_points_in_sphere(fcoords,
coords[i], self._rmax, zip_results=False)
# remove the rii term
inds = rij > 1e-8
js = js[inds]
rij = rij[inds]
nfcoords = nfcoords[inds]
qi = qs[i]
qj = qs[js]
erfcval = erfc(self._sqrt_eta * rij)
new_ereals = erfcval * qi * qj / rij
# insert new_ereals
for k in range(numsites):
ereal[k, i] = np.sum(new_ereals[js == k])
if self._compute_forces:
nccoords = self._s.lattice.get_cartesian_coords(nfcoords)
fijpf = qj / rij ** 3 * (erfcval + forcepf * rij *
np.exp(-self._eta * rij ** 2))
forces[i] += np.sum(np.expand_dims(fijpf, 1) *
(np.array([coords[i]]) - nccoords) *
qi * EwaldSummation.CONV_FACT, axis=0)
ereal *= 0.5 * EwaldSummation.CONV_FACT
epoint *= EwaldSummation.CONV_FACT
return ereal, epoint, forces
@property
def eta(self):
"""
Returns: eta value used in Ewald summation.
"""
return self._eta
def __str__(self):
if self._compute_forces:
output = ["Real = " + str(self.real_space_energy),
"Reciprocal = " + str(self.reciprocal_space_energy),
"Point = " + str(self.point_energy),
"Total = " + str(self.total_energy),
"Forces:\n" + str(self.forces)
]
else:
output = ["Real = " + str(self.real_space_energy),
"Reciprocal = " + str(self.reciprocal_space_energy),
"Point = " + str(self.point_energy),
"Total = " + str(self.total_energy),
"Forces were not computed"]
return "\n".join(output)
class EwaldMinimizer:
"""
This class determines the manipulations that will minimize an ewald matrix,
given a list of possible manipulations. This class does not perform the
manipulations on a structure, but will return the list of manipulations
that should be done on one to produce the minimal structure. It returns the
manipulations for the n lowest energy orderings. This class should be used
to perform fractional species substitution or fractional species removal to
produce a new structure. These manipulations create large numbers of
candidate structures, and this class can be used to pick out those with the
lowest ewald sum.
An alternative (possibly more intuitive) interface to this class is the
order disordered structure transformation.
Author - Will Richards
"""
ALGO_FAST = 0
ALGO_COMPLETE = 1
ALGO_BEST_FIRST = 2
"""
ALGO_TIME_LIMIT: Slowly increases the speed (with the cost of decreasing
accuracy) as the minimizer runs. Attempts to limit the run time to
approximately 30 minutes.
"""
ALGO_TIME_LIMIT = 3
def __init__(self, matrix, m_list, num_to_return=1, algo=ALGO_FAST):
"""
Args:
matrix: A matrix of the ewald sum interaction energies. This is stored
in the class as a diagonally symmetric array and so
self._matrix will not be the same as the input matrix.
m_list: list of manipulations. each item is of the form
(multiplication fraction, number_of_indices, indices, species)
These are sorted such that the first manipulation contains the
most permutations. this is actually evaluated last in the
recursion since I'm using pop.
num_to_return: The minimizer will find the number_returned lowest
energy structures. This is likely to return a number of duplicate
structures so it may be necessary to overestimate and then
remove the duplicates later. (duplicate checking in this
process is extremely expensive)
"""
# Setup and checking of inputs
self._matrix = copy(matrix)
# Make the matrix diagonally symmetric (so matrix[i,:] == matrix[:,j])
for i in range(len(self._matrix)):
for j in range(i, len(self._matrix)):
value = (self._matrix[i, j] + self._matrix[j, i]) / 2
self._matrix[i, j] = value
self._matrix[j, i] = value
# sort the m_list based on number of permutations
self._m_list = sorted(m_list, key=lambda x: comb(len(x[2]), x[1]),
reverse=True)
for mlist in self._m_list:
if mlist[0] > 1:
raise ValueError('multiplication fractions must be <= 1')
self._current_minimum = float('inf')
self._num_to_return = num_to_return
self._algo = algo
if algo == EwaldMinimizer.ALGO_COMPLETE:
raise NotImplementedError('Complete algo not yet implemented for '
'EwaldMinimizer')
self._output_lists = []
# Tag that the recurse function looks at at each level. If a method
# sets this to true it breaks the recursion and stops the search.
self._finished = False
self._start_time = datetime.utcnow()
self.minimize_matrix()
self._best_m_list = self._output_lists[0][1]
self._minimized_sum = self._output_lists[0][0]
def minimize_matrix(self):
"""
This method finds and returns the permutations that produce the lowest
ewald sum calls recursive function to iterate through permutations
"""
if self._algo == EwaldMinimizer.ALGO_FAST or \
self._algo == EwaldMinimizer.ALGO_BEST_FIRST:
return self._recurse(self._matrix, self._m_list,
set(range(len(self._matrix))))
def add_m_list(self, matrix_sum, m_list):
"""
This adds an m_list to the output_lists and updates the current
minimum if the list is full.
"""
if self._output_lists is None:
self._output_lists = [[matrix_sum, m_list]]
else:
bisect.insort(self._output_lists, [matrix_sum, m_list])
if self._algo == EwaldMinimizer.ALGO_BEST_FIRST and \
len(self._output_lists) == self._num_to_return:
self._finished = True
if len(self._output_lists) > self._num_to_return:
self._output_lists.pop()
if len(self._output_lists) == self._num_to_return:
self._current_minimum = self._output_lists[-1][0]
def best_case(self, matrix, m_list, indices_left):
"""
Computes a best case given a matrix and manipulation list.
Args:
matrix: the current matrix (with some permutations already
performed)
m_list: [(multiplication fraction, number_of_indices, indices,
species)] describing the manipulation
indices: Set of indices which haven't had a permutation
performed on them.
"""
m_indices = []
fraction_list = []
for m in m_list:
m_indices.extend(m[2])
fraction_list.extend([m[0]] * m[1])
indices = list(indices_left.intersection(m_indices))
interaction_matrix = matrix[indices, :][:, indices]
fractions = np.zeros(len(interaction_matrix)) + 1
fractions[:len(fraction_list)] = fraction_list
fractions = np.sort(fractions)
# Sum associated with each index (disregarding interactions between
# indices)
sums = 2 * np.sum(matrix[indices], axis=1)
sums = np.sort(sums)
# Interaction corrections. Can be reduced to (1-x)(1-y) for x,y in
# fractions each element in a column gets multiplied by (1-x), and then
# the sum of the columns gets multiplied by (1-y) since fractions are
# less than 1, there is no effect of one choice on the other
step1 = np.sort(interaction_matrix) * (1 - fractions)
step2 = np.sort(np.sum(step1, axis=1))
step3 = step2 * (1 - fractions)
interaction_correction = np.sum(step3)
if self._algo == self.ALGO_TIME_LIMIT:
elapsed_time = datetime.utcnow() - self._start_time
speedup_parameter = elapsed_time.total_seconds() / 1800
avg_int = np.sum(interaction_matrix, axis=None)
avg_frac = np.average(np.outer(1 - fractions, 1 - fractions))
average_correction = avg_int * avg_frac
interaction_correction = average_correction * speedup_parameter \
+ interaction_correction * (1 - speedup_parameter)
best_case = np.sum(matrix) + np.inner(sums[::-1], fractions - 1) + interaction_correction
return best_case
def get_next_index(self, matrix, manipulation, indices_left):
"""
Returns an index that should have the most negative effect on the
matrix sum
"""
f = manipulation[0]
indices = list(indices_left.intersection(manipulation[2]))
sums = np.sum(matrix[indices], axis=1)
if f < 1:
next_index = indices[sums.argmax(axis=0)]
else:
next_index = indices[sums.argmin(axis=0)]
return next_index
def _recurse(self, matrix, m_list, indices, output_m_list=[]):
"""
This method recursively finds the minimal permutations using a binary
tree search strategy.
Args:
matrix: The current matrix (with some permutations already
performed).
m_list: The list of permutations still to be performed
indices: Set of indices which haven't had a permutation
performed on them.
"""
# check to see if we've found all the solutions that we need
if self._finished:
return
# if we're done with the current manipulation, pop it off.
while m_list[-1][1] == 0:
m_list = copy(m_list)
m_list.pop()
# if there are no more manipulations left to do check the value
if not m_list:
matrix_sum = np.sum(matrix)
if matrix_sum < self._current_minimum:
self.add_m_list(matrix_sum, output_m_list)
return
# if we wont have enough indices left, return
if m_list[-1][1] > len(indices.intersection(m_list[-1][2])):
return
if len(m_list) == 1 or m_list[-1][1] > 1:
if self.best_case(matrix, m_list, indices) > self._current_minimum:
return
index = self.get_next_index(matrix, m_list[-1], indices)
m_list[-1][2].remove(index)
# Make the matrix and new m_list where we do the manipulation to the
# index that we just got
matrix2 = np.copy(matrix)
m_list2 = deepcopy(m_list)
output_m_list2 = copy(output_m_list)
matrix2[index, :] *= m_list[-1][0]
matrix2[:, index] *= m_list[-1][0]
output_m_list2.append([index, m_list[-1][3]])
indices2 = copy(indices)
indices2.remove(index)
m_list2[-1][1] -= 1
# recurse through both the modified and unmodified matrices
self._recurse(matrix2, m_list2, indices2, output_m_list2)
self._recurse(matrix, m_list, indices, output_m_list)
@property
def best_m_list(self):
"""
Returns: Best m_list found.
"""
return self._best_m_list
@property
def minimized_sum(self):
"""
Returns: Minimized sum
"""
return self._minimized_sum
@property
def output_lists(self):
"""
Returns: output lists.
"""
return self._output_lists
def compute_average_oxidation_state(site):
"""
Calculates the average oxidation state of a site
Args:
site: Site to compute average oxidation state
Returns:
Average oxidation state of site.
"""
try:
avg_oxi = sum([sp.oxi_state * occu
for sp, occu in site.species.items()
if sp is not None])
return avg_oxi
except AttributeError:
pass
try:
return site.charge
except AttributeError:
raise ValueError("Ewald summation can only be performed on structures "
"that are either oxidation state decorated or have "
"site charges.")
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import io
from builtins import *
from lxml import etree
from lxml import objectify
import json
import sys
sys.path.append('..')
import pyreqif.pyreqif
from pprint import pprint
import re
def pretty(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent + 1)
else:
print('\t' * (indent + 1) + str(value))
transLationTable = {"IDENTIFIER": "identifier",
"COUNTRY-CODE": "countryCode",
"CREATION-TIME": "creationTime",
"TITLE": "title",
"COMMENT": "comment",
"AUTHOR": "author",
"LONG-NAME": "longName",
"VERSION": "version",
"SOURCE-TOOL-ID": "sourceToolId",
"LAST-CHANGE": "lastChange",
"EMBEDDED": "embedded",
"TYPE": "type",
"VALUES": "values",
"CONTENT-REF": "contentref",
"CONTENT": "content",
"DESC": "desc"}
mapReqifAttributeValue = {"default": "embeddedDoc",
"ATTRIBUTE-VALUE-EMBEDDED-DOCUMENT": "embeddedDoc",
"ATTRIBUTE-VALUE-STRING": "string",
"ATTRIBUTE-VALUE-XHTML": "embeddedDoc",
"ATTRIBUTE-VALUE-BOOLEAN": "embeddedDoc",
"ATTRIBUTE-VALUE-INTEGER": "embeddedDoc"}
mapReqifAttributeDefinition = {"default": "complex",
"ATTRIBUTE-DEFINITION-COMPLEX": "complex",
"ATTRIBUTE-DEFINITION-STRING": "string",
"ATTRIBUTE-DEFINITION-XHTML": "complex",
"ATTRIBUTE-DEFINITION-BOOLEAN": "complex",
"ATTRIBUTE-DEFINITION-INTEGER": "complex"}
mapReqifDatatypeDefinition = {"default": "document",
"DATATYPE-DEFINITION-DOCUMENT": "document",
"DATATYPE-DEFINITION-STRING": "string",
"DATATYPE-DEFINITION-XHTML": "document",
"DATATYPE-DEFINITION-BOOLEAN": "document",
"DATATYPE-DEFINITION-INTEGER": "document"}
transLationTableReverse = dict(map(reversed, transLationTable.items()))
mapReqifAttributeValueReversed = dict(map(reversed, mapReqifAttributeValue.items()))
mapReqifAttributeDefinitionReversed = dict(map(reversed, mapReqifAttributeDefinition.items()))
mapReqifDatatypeDefinitionReversed = dict(map(reversed, mapReqifDatatypeDefinition.items()))
def mapReqifAttributeValue2Py(elem: str):
if elem in mapReqifAttributeValue:
return mapReqifAttributeValue[elem]
else:
print("Not supported datatype: ")
print(elem)
return mapReqifAttributeValue['default']
def mapPy2ReqifAttributeValue(elem: str):
if elem in mapReqifAttributeValueReversed:
return mapReqifAttributeValueReversed[elem]
else:
print("Not supported datatype: ")
print(elem)
return mapReqifAttributeValueReversed['default']
def mapReqifAttributeDefinition2Py(elem: str):
if elem in mapReqifAttributeDefinition:
return mapReqifAttributeDefinition[elem]
else:
print("Not supported attribute definition: ")
print(elem)
return mapReqifAttributeDefinition['default']
def mapPy2ReqifAttributeDefinition(elem: str):
if elem in mapReqifAttributeDefinitionReversed:
return mapReqifAttributeDefinitionReversed[elem]
else:
print("Not supported attribute definition: ")
print(elem)
return mapReqifAttributeDefinitionReversed['default']
def mapReqifDatatypeDefinition2Py(elem: str):
if elem in mapReqifDatatypeDefinition:
return mapReqifDatatypeDefinition[elem]
else:
print("Not supported datatype definition: ")
print(elem)
return mapReqifDatatypeDefinition['default']
def mapPy2ReqifDatatypeDefinition(elem: str):
if elem in mapReqifDatatypeDefinitionReversed:
return mapReqifDatatypeDefinitionReversed[elem]
else:
print("Not supported datatype datatype: ")
print(elem)
return mapReqifDatatypeDefinitionReversed['default']
def py2reqif(myDict):
MyNewDict = {}
for pyname in myDict:
if pyname in transLationTableReverse:
reqifname = transLationTableReverse[pyname]
MyNewDict[reqifname] = myDict[pyname]
else:
MyNewDict[pyname] = myDict[pyname]
return MyNewDict
def reqif2py(myDict):
MyNewDict = {}
for reqifname in myDict:
if reqifname in transLationTable:
pyname = transLationTable[reqifname]
MyNewDict[pyname] = myDict[reqifname]
else:
MyNewDict[reqifname] = myDict[reqifname]
return MyNewDict
def load(f):
inputType = "RIF"
doc = pyreqif.pyreqif.doc()
tree = etree.parse(f)
root = tree.getroot()
ns = "{" + tree.xpath('namespace-uri(.)') + "}"
nsp = tree.xpath('namespace-uri(.)')
def getSubElementValuesByTitle(xmlElement, tagNameArray=[]):
defaultsSubElements = ['IDENTIFIER', 'LAST-CHANGE', 'LONG-NAME']
# ALTERNATIVE-ID ?
tagNameArray = list(set(defaultsSubElements + tagNameArray))
returnDict = {}
for tag in tagNameArray:
if tag in xmlElement.attrib:
returnDict[tag] = xmlElement.attrib[tag]
else:
temp = xmlElement.find('./' + ns + tag)
if temp is not None:
returnDict[tag] = temp.text
return returnDict
if root.tag == ns + "REQ-IF":
inputType = "REQIF"
headerRoot = root.find('./' + ns + 'THE-HEADER/' + ns + 'REQ-IF-HEADER')
contentRoot = root.find('./' + ns + 'CORE-CONTENT/' + ns + 'REQ-IF-CONTENT')
else:
headerRoot = root
contentRoot = root
headerTags = getSubElementValuesByTitle(headerRoot,
['AUTHOR', 'COMMENT', 'COUNTRY-CODE', 'CREATION-TIME', 'SOURCE-TOOL-ID',
'TITLE', 'VERSION'])
# header missing:
# COMMENT, REPOSITORY-ID, REQ-IF-TOOL-ID, REQ-IF-VERSION
doc.addHeader(reqif2py(headerTags))
datatypesXmlElement = contentRoot.find('./' + ns + 'DATATYPES')
for child in datatypesXmlElement:
if child.tag == ns + "DATATYPE-DEFINITION-DOCUMENT" or child.tag == ns + 'DATATYPE-DEFINITION-STRING' or child.tag == ns + 'DATATYPE-DEFINITION-XHTML' \
or child.tag == ns + 'DATATYPE-DEFINITION-BOOLEAN' or child.tag == ns + "DATATYPE-DEFINITION-INTEGER":
datatypeProto = getSubElementValuesByTitle(child, ['EMBEDDED'])
tagWithoutNamespace = re.sub('{[\S]*}', '', child.tag)
datatypeProto['type'] = mapReqifDatatypeDefinition2Py(tagWithoutNamespace)
doc.addDatatype(reqif2py(datatypeProto))
elif child.tag == ns + "DATATYPE-DEFINITION-ENUMERATION":
datatypeProto = getSubElementValuesByTitle(child, ['EMBEDDED'])
datatypeProto['type'] = "enum"
specifiedValues = child.find('./' + ns + "SPECIFIED-VALUES")
values = {}
for valElement in specifiedValues:
tempDict = getSubElementValuesByTitle(valElement)
properties = valElement.find('./' + ns + "PROPERTIES")
embeddedValue = properties.find('./' + ns + "EMBEDDED-VALUE")
tempDict['properites'] = reqif2py(getSubElementValuesByTitle(embeddedValue, ['KEY', 'OTHER-CONTENT']))
tempDict = reqif2py(tempDict)
values[tempDict['identifier']] = tempDict
datatypeProto['values'] = values
doc.addDatatype(reqif2py(datatypeProto))
else:
# missing:
# DATATYPE-DEFINITION-BOOLEAN
# DATATYPE-DEFINITION-DATE
# DATATYPE-DEFINITION-INTEGER
# DATATYPE-DEFINITION-REAL
print("Not supported datatype: ", )
print(child.tag)
specTypesXmlElement = contentRoot.find('./' + ns + 'SPEC-TYPES')
for child in specTypesXmlElement:
if child.tag == ns + "SPEC-TYPE" or child.tag == ns + "SPEC-OBJECT-TYPE":
specType = getSubElementValuesByTitle(child, ['DESC'])
# specType = getSubElementValuesByTitle(child)
attributesXml = child.find('./' + ns + "SPEC-ATTRIBUTES")
if attributesXml is not None:
for attribute in attributesXml:
if attribute.tag == ns + "ATTRIBUTE-DEFINITION-COMPLEX" or attribute.tag == ns + "ATTRIBUTE-DEFINITION-STRING" or attribute.tag == ns + "ATTRIBUTE-DEFINITION-XHTML" \
or attribute.tag == ns + "ATTRIBUTE-DEFINITION-BOOLEAN" or attribute.tag == ns + "ATTRIBUTE-DEFINITION-INTEGER":
specAttribType = getSubElementValuesByTitle(attribute)
tagWithoutNamespace = re.sub('{[\S]*}', '', attribute.tag)
specAttribType["type"] = mapReqifAttributeDefinition2Py(tagWithoutNamespace)
typeTag = attribute.find('./' + ns + 'TYPE')
if typeTag is not None:
reference = typeTag.getchildren()[0]
# reference = typeTag.find('./' + ns + 'DATATYPE-DEFINITION-DOCUMENT-REF')
reference = typeTag.getchildren()[0]
if doc.datatypeById(reference.text):
specAttribType['typeRef'] = reference.text
else:
print("BEEP unknown Datatype")
elif attribute.tag == ns + "ATTRIBUTE-DEFINITION-ENUMERATION":
specAttribType = getSubElementValuesByTitle(attribute)
specAttribType["type"] = "enum"
typeRef = attribute.find('./' + ns + 'TYPE/' + ns + 'DATATYPE-DEFINITION-ENUMERATION-REF')
if typeRef is not None:
specAttribType['typeRef'] = typeRef.text
defaultValue = attribute.find(
'./' + ns + 'DEFAULT-VALUE/' + ns + 'ATTRIBUTE-VALUE-ENUMERATION/' + ns + 'VALUES/' + ns + 'ENUM-VALUE-REF')
if defaultValue is not None:
specAttribType['defaultValue'] = defaultValue.text
else:
print("Not supported Attribute: ", )
print(attribute.tag)
specAttribType = reqif2py(specAttribType)
specType[specAttribType['identifier']] = specAttribType
# specType[specAttribType['identifier']].pop('identifier')
doc.addRequirementType(reqif2py(specType))
def remove_namespaces(thedoc):
# http://wiki.tei-c.org/index.php/Remove-Namespaces.xsl
xslt = '''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/|comment()|processing-instruction()">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:attribute name="{local-name()}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
'''
# xslt_doc = etree.parse(io.BytesIO(xslt))
xslt_doc = etree.parse(io.BytesIO(bytes(xslt, "utf8")))
transform = etree.XSLT(xslt_doc)
ret = transform(thedoc)
return ret
specObjectsXmlElement = contentRoot.find('./' + ns + 'SPEC-OBJECTS')
for requirementXml in specObjectsXmlElement:
requirement = None
if requirementXml.tag == ns + "SPEC-OBJECT":
requirement = getSubElementValuesByTitle(requirementXml)
typeRefXml = requirementXml.find('./' + ns + 'TYPE/' + ns + 'SPEC-TYPE-REF')
if typeRefXml is None:
typeRefXml = requirementXml.find('./' + ns + 'TYPE/' + ns + 'SPEC-OBJECT-TYPE-REF')
if typeRefXml is not None:
requirement["typeRef"] = typeRefXml.text
valuesXml = requirementXml.find('./' + ns + 'VALUES')
values = {}
for valueXml in valuesXml:
value = getSubElementValuesByTitle(valueXml)
# TODO : Support other types
if valueXml.tag == ns + 'ATTRIBUTE-VALUE-EMBEDDED-DOCUMENT' or valueXml.tag == ns + 'ATTRIBUTE-VALUE-STRING' or valueXml.tag == ns + 'ATTRIBUTE-VALUE-XHTML' \
or valueXml.tag == ns + 'ATTRIBUTE-VALUE-BOOLEAN' or valueXml.tag == ns + 'ATTRIBUTE-VALUE-INTEGER':
attributeRefXml = valueXml.find('./' + ns + 'DEFINITION').getchildren()[0]
value['attributeRef'] = attributeRefXml.text
if 'THE-VALUE' in valueXml.attrib:
value["content"] = valueXml.attrib['THE-VALUE']
else:
contentXml = valueXml.find(
'./' + ns + 'XHTML-CONTENT/{http://automotive-his.de/200706/rif-xhtml}div')
if contentXml is None:
contentXml = valueXml.find("./" + ns + 'THE-VALUE/{http://www.w3.org/1999/xhtml}div')
value["content"] = etree.tostring(remove_namespaces(contentXml))
# value["content"] = "".join(contentXml.itertext())
tagWithoutNamespace = re.sub('{[\S]*}', '', valueXml.tag)
value["type"] = mapReqifAttributeValue2Py(tagWithoutNamespace)
elif valueXml.tag == ns + 'ATTRIBUTE-VALUE-ENUMERATION':
value["type"] = "enum"
attributeRefXml = valueXml.find(
'./' + ns + 'DEFINITION/' + ns + 'ATTRIBUTE-DEFINITION-ENUMERATION-REF')
value['attributeRef'] = attributeRefXml.text
contentXml = valueXml.findall('./' + ns + 'VALUES/' + ns + 'ENUM-VALUE-REF')
if contentXml is not None:
value["contentRef"] = []
for content in contentXml:
value["contentRef"].append(content.text)
a = 1
else:
value["contentRef"] = None
else:
print("valueType not supported yet:", )
print(valueXml.tag[len(ns):])
values[value['attributeRef']] = reqif2py(value)
requirement["values"] = values
else:
print("Unknown spec object tag:", )
print(requirementXml.tag)
if requirement != None:
doc.addRequirement(reqif2py(requirement))
specGroupsXml = contentRoot.find('./' + ns + 'SPEC-GROUPS')
if specGroupsXml is not None:
for specGroupXml in specGroupsXml:
if specGroupXml.tag == ns + "SPEC-GROUP":
specification = getSubElementValuesByTitle(specGroupXml, ['DESC'])
spec = pyreqif.pyreqif.specification(**reqif2py(specification))
specObjectsXml = specGroupXml.find('./' + ns + 'SPEC-OBJECTS')
for specObjectRef in specObjectsXml:
spec.addReq(specObjectRef.text)
doc.addSpecification(spec)
def getHierarchy(hierarchyEle, inputType):
hierarchyDict = getSubElementValuesByTitle(hierarchyEle)
typeRef = hierarchyEle.find('./' + ns + 'TYPE/' + ns + 'SPEC-TYPE-REF')
if typeRef is not None:
hierarchyDict["typeRef"] = typeRef.text
objectRef = hierarchyEle.find('./' + ns + 'OBJECT/' + ns + 'SPEC-OBJECT-REF')
if objectRef is not None:
hierarchyDict["objectRef"] = objectRef.text
hierarchy = pyreqif.pyreqif.hierarchy(**reqif2py(hierarchyDict))
children = hierarchyEle.find('./' + ns + 'CHILDREN')
if children is not None:
for child in children:
hierarchy.addChild(getHierarchy(child, inputType))
return hierarchy
if inputType == "RIF":
hierarchyRoots = contentRoot.find('./' + ns + 'SPEC-HIERARCHY-ROOTS')
elif inputType == "REQIF":
hierarchyRoots = contentRoot.find('./' + ns + 'SPECIFICATIONS')
for hierarchyRoot in hierarchyRoots:
doc.hierarchy.append(getHierarchy(hierarchyRoot, inputType))
# SPEC-HIERARCHY
relations = {}
specRelsXml = contentRoot.find('./' + ns + 'SPEC-RELATIONS')
if specRelsXml is not None:
for specRelXml in specRelsXml:
if specRelXml.tag == ns + "SPEC-RELATION":
relation = getSubElementValuesByTitle(specRelXml)
typeRef = specRelXml.find('./' + ns + 'TYPE')
if typeRef is not None:
relation["typeRef"] = typeRef.getchildren()[0].text
sourceRef = specRelXml.find('./' + ns + 'SOURCE/' + ns + 'SPEC-OBJECT-REF')
if sourceRef is not None:
relation["sourceRef"] = sourceRef.text
targetRef = specRelXml.find('./' + ns + 'TARGET/' + ns + 'SPEC-OBJECT-REF')
if targetRef is not None:
relation["targetRef"] = targetRef.text
doc.addRelation(reqif2py(relation))
return doc
attributesForElements = ["IDENTIFIER", "LAST-CHANGE", "LONG-NAME", "MAX-LENGTH", "MAX", "MIN", "ACCURACY",
"OTHER-CONTENT", "KEY", "MULTI-VALUED"]
notUsedAttributes = ["COUNTRY-CODE", "EMBEDDED", "AUTHOR", "VERSION", "DESC", "contentRef"]
def createSubElements(parent, myDict):
for key in myDict:
if key in attributesForElements or key in notUsedAttributes:
continue
sn = etree.SubElement(parent, key)
if myDict[key] is not None:
sn.text = myDict[key]
else:
sn.text = 'None'
def createSubElement(parent, tag, text=None, attributes=None):
sn = etree.SubElement(parent, tag)
if text is not None:
sn.text = text
if attributes is not None:
for attributeName in attributesForElements:
if attributeName in attributes and attributes[
attributeName] is not None and attributeName not in notUsedAttributes:
sn.attrib[attributeName] = attributes[attributeName]
return sn
def dump(doc, f):
xsi = 'http://www.w3.org/2001/XMLSchema-instance'
arVersion = "1"
root = etree.Element(
'REQ-IF',
nsmap={
None: 'http://www.omg.org/spec/ReqIF/20110401/reqif.xsd',
'xhtml': "http://www.w3.org/1999/xhtml",
'id': "http://pror.org/presentation/id",
"configuration": "http://eclipse.org/rmf/pror/toolextensions/1.0",
})
#
# HEADER
#
theheader = createSubElement(root, "THE-HEADER")
headerXML = createSubElement(theheader, "REQ-IF-HEADER", attributes=py2reqif(doc.header))
tempDict = py2reqif(doc.header)
tempDict["REQ-IF-TOOL-ID"] = tempDict["SOURCE-TOOL-ID"]
tempDict["REQ-IF-VERSION"] = "1.0"
tempDict["SOURCE-TOOL-ID"] = "pyreqif"
for tagName in ["COMMENT", "CREATION-TIME", "REQ-IF-TOOL-ID", "REQ-IF-VERSION", "SOURCE-TOOL-ID", "TITLE"]:
createSubElement(headerXML, tagName, tempDict[tagName])
coreContent = createSubElement(root, "CORE-CONTENT")
reqIfContent = createSubElement(coreContent, "REQ-IF-CONTENT")
#
# DATATYPES
#
datatypesXml = createSubElement(reqIfContent, "DATATYPES")
for datatype in doc.datatypeList:
if datatype.mytype == "document":
myDict = py2reqif(datatype.toDict())
datatypeXml = createSubElement(datatypesXml, "DATATYPE-DEFINITION-XHTML", attributes=myDict)
del myDict["TYPE"]
createSubElements(datatypeXml, myDict)
if datatype.mytype == "string":
myDict = py2reqif(datatype.toDict())
datatypeXml = createSubElement(datatypesXml, "DATATYPE-DEFINITION-STRING", attributes=myDict)
del myDict["TYPE"]
createSubElements(datatypeXml, myDict)
if datatype.mytype == "enum":
datatypeXml = createSubElement(datatypesXml, "DATATYPE-DEFINITION-ENUMERATION",
attributes=py2reqif(datatype.toDict()))
myDict = py2reqif(datatype.toDict())
del myDict["TYPE"]
createSubElements(datatypeXml, myDict)
specifiedValuesXml = createSubElement(datatypeXml, "SPECIFIED-VALUES")
for value, label in datatype.valueTable.items():
valuesXml = createSubElement(specifiedValuesXml, "ENUM-VALUE", attributes=py2reqif(label))
# createSubElement(valuesXml, "IDENTIFIER", value)
for element, content in py2reqif(label).items():
if element == "properites":
props = createSubElement(valuesXml, "PROPERTIES")
createSubElement(props, "EMBEDDED-VALUE", attributes=py2reqif(content))
elif element not in attributesForElements:
createSubElement(valuesXml, element, content)
#
# SPEC-TYPES
#
specTypes = createSubElement(reqIfContent, "SPEC-TYPES")
for reqType in doc.requirementTypeList:
specType = createSubElement(specTypes, "SPEC-OBJECT-TYPE", attributes=py2reqif(reqType.toDict()))
createSubElements(specType, py2reqif(reqType.toDict()))
if len(reqType.myTypes) > 0:
attributesXml = createSubElement(specType, "SPEC-ATTRIBUTES")
for mytype, ref in reqType.myTypes.items():
attribDict = py2reqif(ref.toDict())
if "TYPE" in attribDict and attribDict["TYPE"] == "enum":
attribDict.pop("TYPE")
attribDict["MULTI-VALUED"] = "false"
enumXml = createSubElement(attributesXml, "ATTRIBUTE-DEFINITION-ENUMERATION", attributes=attribDict)
for value, label in attribDict.items():
if value == "typeRef":
typeXml = createSubElement(enumXml, "TYPE")
createSubElement(typeXml, "DATATYPE-DEFINITION-ENUMERATION-REF", label)
elif value not in attributesForElements:
createSubElement(enumXml, value, label)
if "TYPE" in attribDict and attribDict["TYPE"] == "complex":
# attribDict.pop("TYPE")
enumXml = createSubElement(attributesXml, "ATTRIBUTE-DEFINITION-XHTML", attributes=attribDict)
attribDict.pop("TYPE")
for value, label in attribDict.items():
if value == "typeRef":
typeXml = createSubElement(enumXml, "TYPE")
createSubElement(typeXml, "DATATYPE-DEFINITION-XHTML-REF", label)
elif value not in attributesForElements and value not in notUsedAttributes:
createSubElement(enumXml, value, label)
if "TYPE" in attribDict and attribDict["TYPE"] == "string":
# attribDict.pop("TYPE")
enumXml = createSubElement(attributesXml, "ATTRIBUTE-DEFINITION-STRING", attributes=attribDict)
attribDict.pop("TYPE")
for value, label in attribDict.items():
if value == "typeRef":
typeXml = createSubElement(enumXml, "TYPE")
createSubElement(typeXml, "DATATYPE-DEFINITION-STRING-REF", label)
elif value not in attributesForElements and value not in notUsedAttributes:
createSubElement(enumXml, value, label)
#
# SPEC-OBJECTS
#
specsXml = createSubElement(reqIfContent, "SPEC-OBJECTS")
for req in doc.requirementList:
specXml = createSubElement(specsXml, "SPEC-OBJECT", attributes=py2reqif(req.toDict()))
requirementDict = py2reqif(req.toDict())
for value, label in requirementDict.items():
if value == "VALUES":
valuesXml = createSubElement(specXml, "VALUES")
for value in label:
tempDict = py2reqif(value.toDict())
if "LONG-NAME" in tempDict:
tempDict.pop("LONG-NAME")
if "LAST-CHANGE" in tempDict:
tempDict.pop("LAST-CHANGE")
if "IDENTIFIER" in tempDict:
tempDict.pop("IDENTIFIER")
if value.mytype == "enum":
valueXml = createSubElement(valuesXml, "ATTRIBUTE-VALUE-ENUMERATION", attributes=tempDict)
valuesValuesXml = createSubElement(valueXml, "VALUES")
valuesDefinitionsXml = createSubElement(valueXml, "DEFINITION")
else:
valueXml = createSubElement(valuesXml, "ATTRIBUTE-VALUE-XHTML", attributes=tempDict)
valuesDefinitionsXml = createSubElement(valueXml, "DEFINITION")
for val, lab in py2reqif(value.toDict()).items():
if val == "contentRef" and lab is not None:
createSubElement(valuesValuesXml, "ENUM-VALUE-REF", lab[0])
elif val == "attributeRef":
if value.mytype == "enum":
createSubElement(valuesDefinitionsXml, "ATTRIBUTE-DEFINITION-ENUMERATION-REF", lab)
elif value.mytype == "embeddedDoc":
createSubElement(valuesDefinitionsXml, "ATTRIBUTE-DEFINITION-XHTML-REF", lab)
elif value.mytype == "string":
createSubElement(valuesDefinitionsXml, "ATTRIBUTE-DEFINITION-STRING-REF", lab)
else:
print("Unknown Type " + value.mytype)
elif val == "TYPE":
pass
elif val == "CONTENT":
if lab is not None:
if '<' in str(lab):
labtree = etree.parse(io.BytesIO(lab))
labroot = labtree.getroot()
for el in labroot.iter():
el.tag = '{http://www.w3.org/1999/xhtml}' + el.tag
contentXml = createSubElement(valueXml, "THE-VALUE")
contentXml.append(labroot)
else:
createSubElement(valueXml, "THE-VALUE", lab)
elif val not in attributesForElements and val not in notUsedAttributes:
createSubElement(valueXml, val, lab)
elif value == "typeRef":
typeXml = createSubElement(specXml, "TYPE")
createSubElement(typeXml, "SPEC-OBJECT-TYPE-REF", label)
elif value not in attributesForElements:
createSubElement(specXml, value, label)
#
# SPEC-RELATIONS
#
specsRelXml = createSubElement(reqIfContent, "SPEC-RELATIONS")
for relation in doc.relations:
specsRel = createSubElement(specsRelXml, "SPEC-RELATION")
for value, label in py2reqif(relation).items():
if value == "typeRef":
typeXml = createSubElement(specsRel, "TYPE")
createSubElement(typeXml, "SPEC-TYPE-REF", label)
elif value == "sourceRef":
sourceXml = createSubElement(specsRel, "SOURCE")
createSubElement(sourceXml, "SPEC-OBJECT-REF", label)
elif value == "targetRef":
targetXml = createSubElement(specsRel, "TARGET")
createSubElement(targetXml, "SPEC-OBJECT-REF", label)
else:
createSubElement(specsRel, value, label)
#
# SPEC-GROUPS
#
# specGroupsXml = createSubElement(reqIfContent, "SPEC-GROUPS")
# for specification in doc.specificationList:
# specGroupXml = createSubElement(specGroupsXml, "SPEC-GROUP")
# for value,label in py2reqif(specification.toDict()).iteritems():
# createSubElement(specGroupXml,value,label)
# specObjectsXml = createSubElement(specGroupXml,"SPEC-OBJECTS")
# for req in specification:
# createSubElement(specObjectsXml ,"SPEC-OBJECT-REF", req)
#
# SPEC-HIERARCHY-ROOTS
#
def createChildHirachy(parentXmlTag, childObject):
childrenXml = createSubElement(parentXmlTag, "CHILDREN")
hierarchXml = createSubElement(childrenXml, "SPEC-HIERARCHY", attributes=py2reqif(childObject.toDict()))
for value, label in py2reqif(childObject.toDict()).items():
if value == "objectRef":
objectXml = createSubElement(hierarchXml, "OBJECT")
createSubElement(objectXml, "SPEC-OBJECT-REF", label)
elif value not in attributesForElements:
if label is not None:
createSubElement(hierarchXml, value, label)
for child in childObject.children:
createChildHirachy(hierarchXml, child)
specHierarchRootsXml = createSubElement(reqIfContent, "SPECIFICATIONS")
# SPEC-HIERARCHY-ROOT
for hierarch in doc.hierarchy:
specHierarchRootXml = createSubElement(specHierarchRootsXml, "SPECIFICATION",
attributes=py2reqif(hierarch.toDict()))
for value, label in py2reqif(hierarch.toDict()).items():
if value == "typeRef":
typeXml = createSubElement(specHierarchRootXml, "TYPE")
createSubElement(typeXml, "SPECIFICATION-TYPE-REF", label)
elif value not in attributesForElements:
createSubElement(specHierarchRootXml, value, label)
for child in hierarch.children:
createChildHirachy(specHierarchRootXml, child)
f.write(etree.tostring(root, pretty_print=True, xml_declaration=True))
|
|
import sys
import numpy as np
from ..extern.six import string_types
from ..extern.six.moves import range
from ..header import Header
from ..util import (_is_pseudo_unsigned, _unsigned_zero, _is_int,
_normalize_slice, lazyproperty)
from .base import DELAYED, _ValidHDU, ExtensionHDU
class _ImageBaseHDU(_ValidHDU):
"""FITS image HDU base class.
Attributes
----------
header
image header
data
image data
"""
# mappings between FITS and numpy typecodes
# TODO: Maybe make these module-level constants instead...
NumCode = {8: 'uint8', 16: 'int16', 32: 'int32', 64: 'int64',
-32: 'float32', -64: 'float64'}
ImgCode = {'uint8': 8, 'int16': 16, 'uint16': 16, 'int32': 32,
'uint32': 32, 'int64': 64, 'uint64': 64, 'float32': -32,
'float64': -64}
standard_keyword_comments = {
'SIMPLE': 'conforms to FITS standard',
'XTENSION': 'Image extension',
'BITPIX': 'array data type',
'NAXIS': 'number of array dimensions',
'GROUPS': 'has groups',
'PCOUNT': 'number of parameters',
'GCOUNT': 'number of groups'
}
def __init__(self, data=None, header=None, do_not_scale_image_data=False,
uint=False, scale_back=None, **kwargs):
from pyfits.hdu.groups import GroupsHDU
super(_ImageBaseHDU, self).__init__(data=data, header=header)
if header is not None:
if not isinstance(header, Header):
# TODO: Instead maybe try initializing a new Header object from
# whatever is passed in as the header--there are various types
# of objects that could work for this...
raise ValueError('header must be a Header object')
if data is DELAYED:
# Presumably if data is DELAYED then this HDU is coming from an
# open file, and was not created in memory
if header is None:
# this should never happen
raise ValueError('No header to setup HDU.')
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# TODO: Some of this card manipulation should go into the
# PrimaryHDU and GroupsHDU subclasses
# construct a list of cards of minimal header
if isinstance(self, ExtensionHDU):
c0 = ('XTENSION', 'IMAGE',
self.standard_keyword_comments['XTENSION'])
else:
c0 = ('SIMPLE', True, self.standard_keyword_comments['SIMPLE'])
cards = [
c0,
('BITPIX', 8, self.standard_keyword_comments['BITPIX']),
('NAXIS', 0, self.standard_keyword_comments['NAXIS'])]
if isinstance(self, GroupsHDU):
cards.append(('GROUPS', True,
self.standard_keyword_comments['GROUPS']))
if isinstance(self, (ExtensionHDU, GroupsHDU)):
cards.append(('PCOUNT', 0,
self.standard_keyword_comments['PCOUNT']))
cards.append(('GCOUNT', 1,
self.standard_keyword_comments['GCOUNT']))
if header is not None:
orig = header.copy()
header = Header(cards)
header.extend(orig, strip=True, update=True, end=True)
else:
header = Header(cards)
self._header = header
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
if do_not_scale_image_data:
self._bzero = 0
self._bscale = 1
else:
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
# Save off other important values from the header needed to interpret
# the image data
self._axes = [self._header.get('NAXIS' + str(axis + 1), 0)
for axis in range(self._header.get('NAXIS', 0))]
self._bitpix = self._header.get('BITPIX', 8)
self._gcount = self._header.get('GCOUNT', 1)
self._pcount = self._header.get('PCOUNT', 0)
self._blank = self._header.get('BLANK')
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
# Set the name attribute if it was provided (if this is an ImageHDU
# this will result in setting the EXTNAME keyword of the header as
# well)
if 'name' in kwargs and kwargs['name']:
self.name = kwargs['name']
# Set to True if the data or header is replaced, indicating that
# update_header should be called
self._modified = False
if data is DELAYED:
if (not do_not_scale_image_data and
(self._bscale != 1 or self._bzero != 0)):
# This indicates that when the data is accessed or written out
# to a new file it will need to be rescaled
self._data_needs_rescale = True
return
else:
self.data = data
self.update_header()
@classmethod
def match_header(cls, header):
"""
_ImageBaseHDU is sort of an abstract class for HDUs containing image
data (as opposed to table data) and should never be used directly.
"""
raise NotImplementedError
@property
def is_image(self):
return True
@property
def section(self):
"""
Access a section of the image array without loading the entire array
into memory. The :class:`Section` object returned by this attribute is
not meant to be used directly by itself. Rather, slices of the section
return the appropriate slice of the data, and loads *only* that section
into memory.
Sections are mostly obsoleted by memmap support, but should still be
used to deal with very large scaled images. See the
:ref:`data-sections` section of the PyFITS documentation for more
details.
"""
return Section(self)
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@property
def header(self):
return self._header
@header.setter
def header(self, header):
self._header = header
self._modified = True
self.update_header()
@lazyproperty
def data(self):
"""
Image/array data as a `~numpy.ndarray`.
Please remember that the order of axes on an Numpy array are opposite
of the order specified in the FITS file. For example for a 2D image
the "rows" or y-axis are the first dimension, and the "columns" or
x-axis are the second dimension.
If the data is scaled using the BZERO and BSCALE parameters, this
attribute returns the data scaled to its physical values unless the
file was opened with ``do_not_scale_image_data=True``.
"""
if len(self._axes) < 1:
return
data = self._get_scaled_image_data(self._data_offset, self.shape)
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if 'data' in self.__dict__:
if self.__dict__['data'] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
if data is not None and not isinstance(data, np.ndarray):
# Try to coerce the data into a numpy array--this will work, on
# some level, for most objects
try:
data = np.array(data)
except:
raise TypeError('data object %r could not be coerced into an '
'ndarray' % data)
self.__dict__['data'] = data
self._modified = True
if isinstance(data, np.ndarray):
self._bitpix = _ImageBaseHDU.ImgCode[data.dtype.name]
self._orig_bitpix = self._bitpix
self._orig_bscale = 1
self._orig_bzero = 0
self._axes = list(data.shape)
self._axes.reverse()
elif self.data is None:
self._axes = []
else:
raise ValueError('not a valid data array')
self.update_header()
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
def update_header(self):
"""
Update the header keywords to agree with the data.
"""
if not (self._modified or self._header._modified or
(self._has_data and self.shape != self.data.shape)):
# Not likely that anything needs updating
return
old_naxis = self._header.get('NAXIS', 0)
if 'BITPIX' not in self._header:
bitpix_comment = self.standard_keyword_comments['BITPIX']
else:
bitpix_comment = self._header.comments['BITPIX']
# Update the BITPIX keyword and ensure it's in the correct
# location in the header
self._header.set('BITPIX', self._bitpix, bitpix_comment, after=0)
# If the data's shape has changed (this may have happened without our
# noticing either via a direct update to the data.shape attribute) we
# need to update the internal self._axes
if self._has_data and self.shape != self.data.shape:
self._axes = list(self.data.shape)
self._axes.reverse()
# Update the NAXIS keyword and ensure it's in the correct location in
# the header
if 'NAXIS' in self._header:
naxis_comment = self._header.comments['NAXIS']
else:
naxis_comment = self.standard_keyword_comments['NAXIS']
self._header.set('NAXIS', len(self._axes), naxis_comment,
after='BITPIX')
# TODO: This routine is repeated in several different classes--it
# should probably be made available as a methond on all standard HDU
# types
# add NAXISi if it does not exist
for idx, axis in enumerate(self._axes):
naxisn = 'NAXIS' + str(idx + 1)
if naxisn in self._header:
self._header[naxisn] = axis
else:
if (idx == 0):
after = 'NAXIS'
else:
after = 'NAXIS' + str(idx)
self._header.set(naxisn, axis, after=after)
# delete extra NAXISi's
for idx in range(len(self._axes) + 1, old_naxis + 1):
try:
del self._header['NAXIS' + str(idx)]
except KeyError:
pass
self._update_uint_scale_keywords()
self._modified = False
def _update_header_scale_info(self, dtype=None):
if (not self._do_not_scale_image_data and
not (self._orig_bzero == 0 and self._orig_bscale == 1)):
if dtype is None:
dtype = self._dtype_for_bitpix()
if (dtype is not None and dtype.kind == 'u' and
(self._scale_back or self._scale_back is None)):
# Data is pseudo-unsigned integers, and the scale_back option
# was not explicitly set to False, so preserve all the scale
# factors
return
for keyword in ['BSCALE', 'BZERO']:
try:
del self._header[keyword]
# Since _update_header_scale_info can, currently, be called
# *after* _prewriteto(), replace these with blank cards so
# the header size doesn't change
self._header.append()
except KeyError:
pass
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self._header['BITPIX'] = _ImageBaseHDU.ImgCode[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self._header['BITPIX']
def scale(self, type=None, option='old', bscale=1, bzero=0):
"""
Scale image data by using ``BSCALE``/``BZERO``.
Call to this method will scale `data` and update the keywords of
``BSCALE`` and ``BZERO`` in the HDU's header. This method should only
be used right before writing to the output file, as the data will be
scaled and is therefore not very usable after the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy
dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'``
etc.). If is `None`, use the current data type.
option : str
How to scale the data: if ``"old"``, use the original
``BSCALE`` and ``BZERO`` values when the data was
read/created. If ``"minmax"``, use the minimum and maximum
of the data to scale. The option will be overwritten by
any user specified ``bscale``/``bzero`` values.
bscale, bzero : int, optional
User-specified ``BSCALE`` and ``BZERO`` values
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = self.NumCode[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if (bscale != 1 or bzero != 0):
_scale = bscale
_zero = bzero
else:
if option == 'old':
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == 'minmax':
if issubclass(_type, np.floating):
_scale = 1
_zero = 0
else:
min = np.minimum.reduce(self.data.flat)
max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = min
_scale = (max - min) / (2.0 ** 8 - 1)
else:
_zero = (max + min) / 2.0
# throw away -2^N
nbytes = 8 * _type().itemsize
_scale = (max - min) / (2.0 ** nbytes - 2)
# Do the scaling
if _zero != 0:
# 0.9.6.3 to avoid out of range error for BZERO = +32768
self.data += -_zero
self._header['BZERO'] = _zero
else:
try:
del self._header['BZERO']
except KeyError:
pass
if _scale and _scale != 1:
self.data /= _scale
self._header['BSCALE'] = _scale
else:
try:
del self._header['BSCALE']
except KeyError:
pass
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type)
# Update the BITPIX Card to match the data
self._bitpix = _ImageBaseHDU.ImgCode[self.data.dtype.name]
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
self._header['BITPIX'] = self._bitpix
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
def _verify(self, option='warn'):
# update_header can fix some things that would otherwise cause
# verification to fail, so do that now...
self.update_header()
return super(_ImageBaseHDU, self)._verify(option)
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self.scale(self.NumCode[self._orig_bitpix])
self.update_header()
if not inplace and not self._has_data:
self._update_header_scale_info()
return super(_ImageBaseHDU, self)._prewriteto(checksum, inplace)
def _writedata_internal(self, fileobj):
size = 0
if self.data is not None:
# Based on the system type, determine the byteorders that
# would need to be swapped to get to big-endian output
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
# deal with unsigned integer 16, 32 and 64 data
if _is_pseudo_unsigned(self.data.dtype):
# Convert the unsigned array to signed
output = np.array(
self.data - _unsigned_zero(self.data.dtype),
dtype='>i%d' % self.data.dtype.itemsize)
should_swap = False
else:
output = self.data
byteorder = output.dtype.str[0]
should_swap = (byteorder in swap_types)
if not fileobj.simulateonly:
if should_swap:
output.byteswap(True)
try:
fileobj.writearray(output)
finally:
output.byteswap(True)
else:
fileobj.writearray(output)
size += output.size * output.itemsize
return size
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in ((16, np.dtype('uint16')),
(32, np.dtype('uint32')),
(64, np.dtype('uint64'))):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype('float64')
elif bitpix > 0: # scale integers to Float32
return np.dtype('float32')
def _convert_pseudo_unsigned(self, data):
"""
Handle "pseudo-unsigned" integers, if the user requested it. Returns
the converted data array if so; otherwise returns None.
In this case case, we don't need to handle BLANK to convert it to NAN,
since we can't do NaNs with integers, anyway, i.e. the user is
responsible for managing blanks.
"""
dtype = self._dtype_for_bitpix()
# bool(dtype) is always False--have to explicitly compare to None; this
# caused a fair amount of hair loss
if dtype is not None and dtype.kind == 'u':
# Convert the input raw data into an unsigned integer array and
# then scale the data adjusting for the value of BZERO. Note that
# we subtract the value of BZERO instead of adding because of the
# way numpy converts the raw signed array into an unsigned array.
bits = dtype.itemsize * 8
data = np.array(data, dtype=dtype)
data -= np.uint64(1 << (bits - 1))
return data
def _get_scaled_image_data(self, offset, shape):
"""
Internal function for reading image data from a file and apply scale
factors to it. Normally this is used for the entire image, but it
supports alternate offset/shape for Section support.
"""
code = _ImageBaseHDU.NumCode[self._orig_bitpix]
raw_data = self._get_raw_data(shape, code, offset)
raw_data.dtype = raw_data.dtype.newbyteorder('>')
if (self._orig_bzero == 0 and self._orig_bscale == 1 and
self._blank is None):
# No further conversion of the data is necessary
return raw_data
data = None
if not (self._orig_bzero == 0 and self._orig_bscale == 1):
data = self._convert_pseudo_unsigned(raw_data)
if data is None:
# In these cases, we end up with floating-point arrays and have to
# apply bscale and bzero. We may have to handle BLANK and convert
# to NaN in the resulting floating-point arrays.
if self._blank is not None:
blanks = raw_data.flat == self._blank
# The size of blanks in bytes is the number of elements in
# raw_data.flat. However, if we use np.where instead we will
# only use 8 bytes for each index where the condition is true.
# So if the number of blank items is fewer than
# len(raw_data.flat) / 8, using np.where will use less memory
if blanks.sum() < len(blanks) / 8:
blanks = np.where(blanks)
new_dtype = self._dtype_for_bitpix()
if new_dtype is not None:
data = np.array(raw_data, dtype=new_dtype)
else: # floating point cases
if self._file.memmap:
data = raw_data.copy()
# if not memmap, use the space already in memory
else:
data = raw_data
del raw_data
if self._orig_bscale != 1:
np.multiply(data, self._orig_bscale, data)
if self._orig_bzero != 0:
data += self._orig_bzero
if self._blank is not None:
data.flat[blanks] = np.nan
return data
# TODO: Move the GroupsHDU-specific summary code to GroupsHDU itself
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
format = ''
else:
format = self.data.dtype.name
format = format[format.rfind('.')+1:]
else:
if self.shape and all(self.shape):
# Only show the format if all the dimensions are non-zero
# if data is not touched yet, use header info.
format = self.NumCode[self._bitpix]
else:
format = ''
# Display shape in FITS-order
shape = tuple(reversed(self.shape))
return (self.name, class_name, len(self._header), shape, format, '')
def _calculate_datasum(self, blocking):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
d = self.data
# First handle the special case where the data is unsigned integer
# 16, 32 or 64
if _is_pseudo_unsigned(self.data.dtype):
d = np.array(self.data - _unsigned_zero(self.data.dtype),
dtype='i%d' % self.data.dtype.itemsize)
# Check the byte order of the data. If it is little endian we
# must swap it before calculating the datasum.
if d.dtype.str[0] != '>':
byteswapped = True
d = d.byteswap(True)
d.dtype = d.dtype.newbyteorder('>')
else:
byteswapped = False
cs = self._compute_checksum(np.fromstring(d, dtype='ubyte'),
blocking=blocking)
# If the data was byteswapped in this method then return it to
# its original little-endian order.
if byteswapped and not _is_pseudo_unsigned(self.data.dtype):
d.byteswap(True)
d.dtype = d.dtype.newbyteorder('<')
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a gereric manner.
return super(_ImageBaseHDU, self)._calculate_datasum(
blocking=blocking)
class Section(object):
"""
Image section.
Slices of this object load the corresponding section of an image array from
the underlying FITS file on disk, and applies any BSCALE/BZERO factors.
Section slices cannot be assigned to, and modifications to a section are
not saved back to the underlying file.
See the :ref:`data-sections` section of the PyFITS documentation for more
details.
"""
def __init__(self, hdu):
self.hdu = hdu
def __getitem__(self, key):
dims = []
if not isinstance(key, tuple):
key = (key,)
naxis = len(self.hdu.shape)
if naxis < len(key):
raise IndexError('too many indices')
elif naxis > len(key):
key = key + (slice(None),) * (naxis - len(key))
offset = 0
# Declare outside of loop scope for use below--don't abuse for loop
# scope leak defect
idx = 0
for idx in range(naxis):
axis = self.hdu.shape[idx]
indx = _iswholeline(key[idx], axis)
offset = offset * axis + indx.offset
# all elements after the first WholeLine must be WholeLine or
# OnePointAxis
if isinstance(indx, (_WholeLine, _LineSlice)):
dims.append(indx.npts)
break
elif isinstance(indx, _SteppedSlice):
raise IndexError('Stepped Slice not supported')
contiguousSubsection = True
for jdx in range(idx + 1, naxis):
axis = self.hdu.shape[jdx]
indx = _iswholeline(key[jdx], axis)
dims.append(indx.npts)
if not isinstance(indx, _WholeLine):
contiguousSubsection = False
# the offset needs to multiply the length of all remaining axes
else:
offset *= axis
if contiguousSubsection:
if not dims:
dims = [1]
dims = tuple(dims)
bitpix = self.hdu._orig_bitpix
offset = self.hdu._data_offset + (offset * abs(bitpix) // 8)
data = self.hdu._get_scaled_image_data(offset, dims)
else:
data = self._getdata(key)
return data
def _getdata(self, keys):
out = []
# Determine the number of slices in the set of input keys.
# If there is only one slice then the result is a one dimensional
# array, otherwise the result will be a multidimensional array.
n_slices = 0
for idx, key in enumerate(keys):
if isinstance(key, slice):
n_slices = n_slices + 1
for idx, key in enumerate(keys):
if isinstance(key, slice):
# OK, this element is a slice so see if we can get the data for
# each element of the slice.
axis = self.hdu.shape[idx]
ns = _normalize_slice(key, axis)
for k in range(ns.start, ns.stop):
key1 = list(keys)
key1[idx] = k
key1 = tuple(key1)
if n_slices > 1:
# This is not the only slice in the list of keys so
# we simply get the data for this section and append
# it to the list that is output. The out variable will
# be a list of arrays. When we are done we will pack
# the list into a single multidimensional array.
out.append(self[key1])
else:
# This is the only slice in the list of keys so if this
# is the first element of the slice just set the output
# to the array that is the data for the first slice.
# If this is not the first element of the slice then
# append the output for this slice element to the array
# that is to be output. The out variable is a single
# dimensional array.
if k == ns.start:
out = self[key1]
else:
out = np.append(out, self[key1])
# We have the data so break out of the loop.
break
if isinstance(out, list):
out = np.array(out)
return out
class PrimaryHDU(_ImageBaseHDU):
"""
FITS primary HDU class.
"""
_default_name = 'PRIMARY'
def __init__(self, data=None, header=None, do_not_scale_image_data=False,
uint=False, scale_back=None):
"""
Construct a primary HDU.
Parameters
----------
data : array or DELAYED, optional
The data in the HDU.
header : Header instance, optional
The header to be used (as a template). If ``header`` is `None`, a
minimal header will be provided.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read.
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, `int16` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as `uint16` data.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data.
"""
super(PrimaryHDU, self).__init__(
data=data, header=header,
do_not_scale_image_data=do_not_scale_image_data, uint=uint,
scale_back=scale_back)
# insert the keywords EXTEND
if header is None:
dim = self._header['NAXIS']
if dim == 0:
dim = ''
self._header.set('EXTEND', True, after='NAXIS' + str(dim))
@classmethod
def match_header(cls, header):
card = header.cards[0]
return (card.keyword == 'SIMPLE' and
('GROUPS' not in header or header['GROUPS'] != True) and
card.value == True)
def update_header(self):
super(PrimaryHDU, self).update_header()
# Update the position of the EXTEND keyword if it already exists
if 'EXTEND' in self._header:
if len(self._axes):
after = 'NAXIS' + str(len(self._axes))
else:
after = 'NAXIS'
self._header.set('EXTEND', after=after)
def _verify(self, option='warn'):
errs = super(PrimaryHDU, self)._verify(option=option)
# Verify location and value of mandatory keywords.
# The EXTEND keyword is only mandatory if the HDU has extensions; this
# condition is checked by the HDUList object. However, if we already
# have an EXTEND keyword check that its position is correct
if 'EXTEND' in self._header:
naxis = self._header.get('NAXIS', 0)
self.req_cards('EXTEND', naxis + 3, lambda v: isinstance(v, bool),
True, option, errs)
return errs
class ImageHDU(_ImageBaseHDU, ExtensionHDU):
"""
FITS image extension HDU class.
"""
_extension = 'IMAGE'
def __init__(self, data=None, header=None, name=None,
do_not_scale_image_data=False, uint=False, scale_back=None):
"""
Construct an image HDU.
Parameters
----------
data : array
The data in the HDU.
header : Header instance
The header to be used (as a template). If `header` is
`None`, a minimal header will be provided.
name : str, optional
The name of the HDU, will be the value of the keyword
``EXTNAME``.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read.
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, `int16` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as `uint16` data.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data.
"""
# This __init__ currently does nothing differently from the base class,
# and is only explicitly defined for the docstring.
super(ImageHDU, self).__init__(
data=data, header=header, name=name,
do_not_scale_image_data=do_not_scale_image_data, uint=uint,
scale_back=scale_back)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, string_types):
xtension = xtension.rstrip()
return card.keyword == 'XTENSION' and xtension == cls._extension
def _verify(self, option='warn'):
"""
ImageHDU verify method.
"""
errs = super(ImageHDU, self)._verify(option=option)
naxis = self._header.get('NAXIS', 0)
# PCOUNT must == 0, GCOUNT must == 1; the former is verifed in
# ExtensionHDU._verify, however ExtensionHDU._verify allows PCOUNT
# to be >= 0, so we need to check it here
self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v == 0),
0, option, errs)
return errs
def _iswholeline(indx, naxis):
if _is_int(indx):
if indx >= 0 and indx < naxis:
if naxis > 1:
return _SinglePoint(1, indx)
elif naxis == 1:
return _OnePointAxis(1, 0)
else:
raise IndexError('Index %s out of range.' % indx)
elif isinstance(indx, slice):
indx = _normalize_slice(indx, naxis)
if (indx.start == 0) and (indx.stop == naxis) and (indx.step == 1):
return _WholeLine(naxis, 0)
else:
if indx.step == 1:
return _LineSlice(indx.stop - indx.start, indx.start)
else:
return _SteppedSlice((indx.stop - indx.start) // indx.step,
indx.start)
else:
raise IndexError('Illegal index %s' % indx)
class _KeyType(object):
def __init__(self, npts, offset):
self.npts = npts
self.offset = offset
class _WholeLine(_KeyType):
pass
class _SinglePoint(_KeyType):
pass
class _OnePointAxis(_KeyType):
pass
class _LineSlice(_KeyType):
pass
class _SteppedSlice(_KeyType):
pass
|
|
import os
import warnings
from functools import lru_cache
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Union
import py
import _pytest._code
from _pytest._code.code import ExceptionChainRepr
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprExceptionInfo
from _pytest._code.source import getfslineno
from _pytest.compat import cached_property
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import PytestPluginManager
from _pytest.deprecated import NODE_USE_FROM_PARENT
from _pytest.fixtures import FixtureDef
from _pytest.fixtures import FixtureLookupError
from _pytest.fixtures import FixtureLookupErrorRepr
from _pytest.mark.structures import Mark
from _pytest.mark.structures import MarkDecorator
from _pytest.mark.structures import NodeKeywords
from _pytest.outcomes import fail
from _pytest.outcomes import Failed
from _pytest.store import Store
if TYPE_CHECKING:
# Imported here due to circular import.
from _pytest.main import Session # noqa: F401
SEP = "/"
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
@lru_cache(maxsize=None)
def _splitnode(nodeid):
"""Split a nodeid into constituent 'parts'.
Node IDs are strings, and can be things like:
''
'testing/code'
'testing/code/test_excinfo.py'
'testing/code/test_excinfo.py::TestFormattedExcinfo'
Return values are lists e.g.
[]
['testing', 'code']
['testing', 'code', 'test_excinfo.py']
['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo']
"""
if nodeid == "":
# If there is no root node at all, return an empty list so the caller's logic can remain sane
return ()
parts = nodeid.split(SEP)
# Replace single last element 'test_foo.py::Bar' with multiple elements 'test_foo.py', 'Bar'
parts[-1:] = parts[-1].split("::")
# Convert parts into a tuple to avoid possible errors with caching of a mutable type
return tuple(parts)
def ischildnode(baseid, nodeid):
"""Return True if the nodeid is a child node of the baseid.
E.g. 'foo/bar::Baz' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp'
"""
base_parts = _splitnode(baseid)
node_parts = _splitnode(nodeid)
if len(node_parts) < len(base_parts):
return False
return node_parts[: len(base_parts)] == base_parts
class NodeMeta(type):
def __call__(self, *k, **kw):
warnings.warn(NODE_USE_FROM_PARENT.format(name=self.__name__), stacklevel=2)
return super().__call__(*k, **kw)
def _create(self, *k, **kw):
return super().__call__(*k, **kw)
class Node(metaclass=NodeMeta):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(
self,
name: str,
parent: Optional["Node"] = None,
config: Optional[Config] = None,
session: Optional["Session"] = None,
fspath: Optional[py.path.local] = None,
nodeid: Optional[str] = None,
) -> None:
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
if config:
self.config = config
else:
if not parent:
raise TypeError("config or parent must be provided")
self.config = parent.config
#: the session this node is part of
if session:
self.session = session
else:
if not parent:
raise TypeError("session or parent must be provided")
self.session = parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = fspath or getattr(parent, "fspath", None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: the marker objects belonging to this node
self.own_markers = [] # type: List[Mark]
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set() # type: Set[str]
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {} # type: Dict[str, FixtureDef]
if nodeid is not None:
assert "::()" not in nodeid
self._nodeid = nodeid
else:
if not self.parent:
raise TypeError("nodeid or parent must be provided")
self._nodeid = self.parent.nodeid
if self.name != "()":
self._nodeid += "::" + self.name
# A place where plugins can store information on the node for their
# own use. Currently only intended for internal plugins.
self._store = Store()
@classmethod
def from_parent(cls, parent: "Node", **kw):
"""
Public Constructor for Nodes
This indirection got introduced in order to enable removing
the fragile logic from the node constructors.
Subclasses can use ``super().from_parent(...)`` when overriding the construction
:param parent: the parent node of this test Node
"""
if "config" in kw:
raise TypeError("config is not a valid argument for from_parent")
if "session" in kw:
raise TypeError("session is not a valid argument for from_parent")
return cls._create(parent=parent, **kw)
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None))
def warn(self, warning):
"""Issue a warning for this item.
Warnings will be displayed after the test session, unless explicitly suppressed
:param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning.
:raise ValueError: if ``warning`` instance is not a subclass of PytestWarning.
Example usage:
.. code-block:: python
node.warn(PytestWarning("some message"))
"""
from _pytest.warning_types import PytestWarning
if not isinstance(warning, PytestWarning):
raise ValueError(
"warning must be an instance of PytestWarning or subclass, got {!r}".format(
warning
)
)
path, lineno = get_fslocation_from_item(self)
warnings.warn_explicit(
warning,
category=None,
filename=str(path),
lineno=lineno + 1 if lineno is not None else None,
)
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
return self._nodeid
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self # type: Optional[Node]
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(
self, marker: Union[str, MarkDecorator], append: bool = True
) -> None:
"""dynamically add a marker object to the node.
:type marker: ``str`` or ``pytest.mark.*`` object
:param marker:
``append=True`` whether to append the marker,
if ``False`` insert at position ``0``.
"""
from _pytest.mark import MARK_GEN
if isinstance(marker, MarkDecorator):
marker_ = marker
elif isinstance(marker, str):
marker_ = getattr(MARK_GEN, marker)
else:
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker_.name] = marker
if append:
self.own_markers.append(marker_.mark)
else:
self.own_markers.insert(0, marker_.mark)
def iter_markers(self, name=None):
"""
:param name: if given, filter the results by the name attribute
iterate over all markers of the node
"""
return (x[1] for x in self.iter_markers_with_node(name=name))
def iter_markers_with_node(self, name=None):
"""
:param name: if given, filter the results by the name attribute
iterate over all markers of the node
returns sequence of tuples (node, mark)
"""
for node in reversed(self.listchain()):
for mark in node.own_markers:
if name is None or getattr(mark, "name", None) == name:
yield node, mark
def get_closest_marker(self, name, default=None):
"""return the first marker matching the name, from closest (for example function) to farther level (for example
module level).
:param default: fallback return value of no marker was found
:param name: name to filter by
"""
return next(self.iter_markers(name=name), default)
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set() # type: Set[str]
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self # type: Optional[Node]
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(
self, excinfo: ExceptionInfo[Union[Failed, FixtureLookupError]], style=None
) -> Union[str, ReprExceptionInfo, ExceptionChainRepr, FixtureLookupErrorRepr]:
if isinstance(excinfo.value, fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
if isinstance(excinfo.value, FixtureLookupError):
return excinfo.value.formatrepr()
if self.config.getoption("fulltrace", False):
style = "long"
else:
tb = _pytest._code.Traceback([excinfo.traceback[-1]])
self._prunetraceback(excinfo)
if len(excinfo.traceback) == 0:
excinfo.traceback = tb
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.getoption("tbstyle", "auto") == "short":
style = "short"
else:
style = "long"
if self.config.getoption("verbose", 0) > 1:
truncate_locals = False
else:
truncate_locals = True
try:
os.getcwd()
abspath = False
except OSError:
abspath = True
return excinfo.getrepr(
funcargs=True,
abspath=abspath,
showlocals=self.config.getoption("showlocals", False),
style=style,
tbfilter=False, # pruned already, or in --fulltrace mode.
truncate_locals=truncate_locals,
)
def repr_failure(
self, excinfo, style=None
) -> Union[str, ReprExceptionInfo, ExceptionChainRepr, FixtureLookupErrorRepr]:
"""
Return a representation of a collection or test failure.
:param excinfo: Exception information for the failure.
"""
return self._repr_failure_py(excinfo, style)
def get_fslocation_from_item(
item: "Item",
) -> Tuple[Union[str, py.path.local], Optional[int]]:
"""Tries to extract the actual location from an item, depending on available attributes:
* "fslocation": a pair (path, lineno)
* "obj": a Python object that the item wraps.
* "fspath": just a path
:rtype: a tuple of (str|LocalPath, int) with filename and line number.
"""
try:
return item.location[:2]
except AttributeError:
pass
obj = getattr(item, "obj", None)
if obj is not None:
return getfslineno(obj)
return getattr(item, "fspath", "unknown location"), -1
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
"""
Return a representation of a collection failure.
:param excinfo: Exception information for the failure.
"""
if excinfo.errisinstance(self.CollectError) and not self.config.getoption(
"fulltrace", False
):
exc = excinfo.value
return str(exc.args[0])
# Respect explicit tbstyle option, but default to "short"
# (_repr_failure_py uses "long" with "fulltrace" option always).
tbstyle = self.config.getoption("tbstyle", "auto")
if tbstyle == "auto":
tbstyle = "short"
return self._repr_failure_py(excinfo, style=tbstyle)
def _prunetraceback(self, excinfo):
if hasattr(self, "fspath"):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
def _check_initialpaths_for_relpath(session, fspath):
for initial_path in session._initialpaths:
if fspath.common(initial_path) == initial_path:
return fspath.relto(initial_path)
class FSHookProxy:
def __init__(
self, fspath: py.path.local, pm: PytestPluginManager, remove_mods
) -> None:
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name: str):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
class FSCollector(Collector):
def __init__(
self, fspath: py.path.local, parent=None, config=None, session=None, nodeid=None
) -> None:
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, SEP)
self.fspath = fspath
session = session or parent.session
if nodeid is None:
nodeid = self.fspath.relto(session.config.rootdir)
if not nodeid:
nodeid = _check_initialpaths_for_relpath(session, fspath)
if nodeid and os.sep != SEP:
nodeid = nodeid.replace(os.sep, SEP)
super().__init__(name, parent, config, session, nodeid=nodeid, fspath=fspath)
self._norecursepatterns = self.config.getini("norecursedirs")
@classmethod
def from_parent(cls, parent, *, fspath):
"""
The public constructor
"""
return super().from_parent(parent=parent, fspath=fspath)
def _gethookproxy(self, fspath: py.path.local):
# check if we have the common case of running
# hooks with all conftest.py files
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugins are active for this fspath
proxy = self.config.hook
return proxy
def _recurse(self, dirpath: py.path.local) -> bool:
if dirpath.basename == "__pycache__":
return False
ihook = self._gethookproxy(dirpath.dirpath())
if ihook.pytest_ignore_collect(path=dirpath, config=self.config):
return False
for pat in self._norecursepatterns:
if dirpath.check(fnmatch=pat):
return False
ihook = self._gethookproxy(dirpath)
ihook.pytest_collect_directory(path=dirpath, parent=self)
return True
def _collectfile(self, path, handle_dupes=True):
assert (
path.isfile()
), "{!r} is not a file (isdir={!r}, exists={!r}, islink={!r})".format(
path, path.isdir(), path.exists(), path.islink()
)
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
if handle_dupes:
keepduplicates = self.config.getoption("keepduplicates")
if not keepduplicates:
duplicate_paths = self.config.pluginmanager._duplicatepaths
if path in duplicate_paths:
return ()
else:
duplicate_paths.add(path)
return ihook.pytest_collect_file(path=path, parent=self)
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None, nodeid=None):
super().__init__(name, parent, config, session, nodeid=nodeid)
self._report_sections = [] # type: List[Tuple[str, str, str]]
#: user properties is a list of tuples (name, value) that holds user
#: defined properties for this test.
self.user_properties = [] # type: List[Tuple[str, Any]]
def runtest(self) -> None:
raise NotImplementedError("runtest must be implemented by Item subclass")
def add_report_section(self, when: str, key: str, content: str) -> None:
"""
Adds a new report section, similar to what's done internally to add stdout and
stderr captured output::
item.add_report_section("call", "stdout", "report section contents")
:param str when:
One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
:param str key:
Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
``"stderr"`` internally.
:param str content:
The full contents as a string.
"""
if content:
self._report_sections.append((when, key, content))
def reportinfo(self) -> Tuple[Union[py.path.local, str], Optional[int], str]:
return self.fspath, None, ""
@cached_property
def location(self) -> Tuple[str, Optional[int], str]:
location = self.reportinfo()
if isinstance(location[0], py.path.local):
fspath = location[0]
else:
fspath = py.path.local(location[0])
relfspath = self.session._node_location_to_relpath(fspath)
assert type(location[2]) is str
return (relfspath, location[1], location[2])
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import threading
import six
if six.PY3:
import queue # pylint: disable=wrong-import-order
else:
import Queue as queue # pylint: disable=wrong-import-order
# Log marker containing SurfaceTexture timestamps.
_SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps'
_SURFACE_TEXTURE_TIMESTAMP_RE = r'\d+'
class SurfaceStatsCollector(object):
"""Collects surface stats for a SurfaceView from the output of SurfaceFlinger.
Args:
device: A DeviceUtils instance.
"""
def __init__(self, device):
self._device = device
self._collector_thread = None
self._surface_before = None
self._get_data_event = None
self._data_queue = None
self._stop_event = None
self._warn_about_empty_data = True
def DisableWarningAboutEmptyData(self):
self._warn_about_empty_data = False
def Start(self):
assert not self._collector_thread
if self._ClearSurfaceFlingerLatencyData():
self._get_data_event = threading.Event()
self._stop_event = threading.Event()
self._data_queue = queue.Queue()
self._collector_thread = threading.Thread(target=self._CollectorThread)
self._collector_thread.start()
else:
raise Exception('SurfaceFlinger not supported on this device.')
def Stop(self):
assert self._collector_thread
(refresh_period, timestamps) = self._GetDataFromThread()
if self._collector_thread:
self._stop_event.set()
self._collector_thread.join()
self._collector_thread = None
return (refresh_period, timestamps)
def _CollectorThread(self):
last_timestamp = 0
timestamps = []
retries = 0
while not self._stop_event.is_set():
self._get_data_event.wait(1)
try:
refresh_period, new_timestamps = self._GetSurfaceFlingerFrameData()
if refresh_period is None or timestamps is None:
retries += 1
if retries < 3:
continue
if last_timestamp:
# Some data has already been collected, but either the app
# was closed or there's no new data. Signal the main thread and
# wait.
self._data_queue.put((None, None))
self._stop_event.wait()
break
raise Exception('Unable to get surface flinger latency data')
timestamps += [
timestamp for timestamp in new_timestamps
if timestamp > last_timestamp
]
if len(timestamps):
last_timestamp = timestamps[-1]
if self._get_data_event.is_set():
self._get_data_event.clear()
self._data_queue.put((refresh_period, timestamps))
timestamps = []
except Exception as e:
# On any error, before aborting, put the exception into _data_queue to
# prevent the main thread from waiting at _data_queue.get() infinitely.
self._data_queue.put(e)
raise
def _GetDataFromThread(self):
self._get_data_event.set()
ret = self._data_queue.get()
if isinstance(ret, Exception):
raise ret
return ret
def _ClearSurfaceFlingerLatencyData(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
results = self._device.RunShellCommand(
['dumpsys', 'SurfaceFlinger', '--latency-clear', 'SurfaceView'],
check_return=True)
return not len(results)
def GetSurfaceFlingerPid(self):
try:
# Returns the first matching PID found.
return next(p.pid for p in self._device.ListProcesses('surfaceflinger'))
except StopIteration:
raise Exception('Unable to get surface flinger process id')
def _GetSurfaceViewWindowName(self):
results = self._device.RunShellCommand(
['dumpsys', 'SurfaceFlinger', '--list'], check_return=True)
for window_name in results:
if window_name.startswith('SurfaceView'):
return window_name
return None
def _GetSurfaceFlingerFrameData(self):
"""Returns collected SurfaceFlinger frame timing data.
Returns:
A tuple containing:
- The display's nominal refresh period in milliseconds.
- A list of timestamps signifying frame presentation times in
milliseconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
window_name = self._GetSurfaceViewWindowName()
command = ['dumpsys', 'SurfaceFlinger', '--latency']
# Even if we don't find the window name, run the command to get the refresh
# period.
if window_name:
command.append(window_name)
output = self._device.RunShellCommand(command, check_return=True)
return ParseFrameData(output, parse_timestamps=bool(window_name))
def to_long_int(val):
"""Cast val to a long int type."""
return long(val) if six.PY2 else int(val)
def ParseFrameData(lines, parse_timestamps):
# adb shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
# pylint: disable=redefined-variable-type
results = []
for line in lines:
# Skip over lines with anything other than digits and whitespace.
if re.search(r'[^\d\s]', line):
logging.warning('unexpected output: %s', line)
else:
results.append(line)
if not results:
return None, None
timestamps = []
nanoseconds_per_millisecond = 1e6
refresh_period = to_long_int(results[0]) / nanoseconds_per_millisecond
if not parse_timestamps:
return refresh_period, timestamps
# If a fence associated with a frame is still pending when we query the
# latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# Since we only care about completed frames, we will ignore any timestamps
# with this value.
pending_fence_timestamp = (1 << 63) - 1
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
logging.warning('Unexpected line: %s', line)
continue
timestamp = to_long_int(fields[1])
if timestamp == pending_fence_timestamp:
continue
timestamp /= nanoseconds_per_millisecond
timestamps.append(timestamp)
return refresh_period, timestamps
|
|
'''
Author: Christoph Garbers
keras layers that are needed if no CuDNN speed up is available
and layers that fell out of favor in keras2
'''
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
import numpy as np
import copy
import warnings
from keras.layers import Conv2DTranspose
from keras import initializers
from keras import activations
from keras import regularizers
from keras import constraints
# from deconv_fun import deconv_fun
# no mask implementation
'''
build(input_shape): this is where you will define your weights. This method must set self.built = True, which can be done by calling super([Layer], self).build().
call(x): this is where the layer's logic lives. Unless you want your layer to support masking, you only have to care about the first argument passed to call: the input tensor.
get_output_shape_for(input_shape): in case your layer modifies the shape of its input, you should specify here the shape transformation logic. This allows Keras to do automatic shape inference.
'''
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.W = self.add_weight(shape=(input_shape[1], self.output_dim),
initializer='random_uniform',
trainable=True)
super(MyLayer, self).build() # Be sure to call this somewhere!
def call(self, x, mask=None):
return K.dot(x, self.W)
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.output_dim)
class DeConv(Conv2DTranspose):
# Adds a scaling of the edges to the Conv2DTranspose layer to avoid
# artifacts in the stride=(1,1) case
def __init__(self, filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
# if type(kernel_size) == 'int':
# self.kernel_size = (kernel_size, kernel_size)
# else:
# self.kernel_size = kernel_size
if strides != (1, 1) or padding != 'valid':
warnings.warn(
'Layer DeConv was not build for this stride and/or padding option!')
super(DeConv, self).__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def build(self, input_shape):
super(DeConv, self).build(input_shape)
shape = super(DeConv, self).compute_output_shape(input_shape)
a = np.zeros(shape)
for i in range(shape[2]):
for j in range(shape[3]):
a[:, :, i, j] = float(np.prod(self.kernel_size))\
/ min(float(i + 1), self.kernel_size[0])\
/ min(float(j + 1), self.kernel_size[1])
self.edge_scale = K.variable(value=a)
def call(self, inputs):
outputs = super(DeConv, self).call(inputs)
outputs = outputs * self.edge_scale
return outputs
# Untied Bias Layer. Can be used instead of Activation.
class Bias(Layer):
def __init__(self, nFilters, **kwargs):
self.nFilters = nFilters
super(Bias, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.b = self.add_weight(shape=(input_shape[1:]),
initializer='constant',
trainable=True,
name='{}_b'.format(self.name))
self.built = True
# Be sure to call this somewhere!
super(Bias, self).build(input_shape)
def call(self, x, mask=None):
output = x
output += self.b.dimshuffle('x', 0, 1, 2)
return output
def get_output_shape_for(self, input_shape):
return input_shape
def compute_output_shape(self, input_shape):
return self.get_output_shape_for(input_shape)
class DeBias(Bias):
def __init__(self, nFilters, **kwargs):
super(DeBias, self).__init__(nFilters, **kwargs)
def call(self, x, mask=None):
output = x
output -= self.b.dimshuffle('x', 0, 1, 2)
return output
# not neede anymore, available in keras 2
def constant(shape, scale=1., name=None):
constant = scale
for i in shape[::-1]:
try:
constant = [constant] * i
except:
print("exception in constant init! i is ",
i, " the shape is ", shape)
exit()
return K.variable(constant)
class fPermute(Layer):
def __init__(self, dims, **kwargs):
self.dims = tuple(dims)
super(fPermute, self).__init__(**kwargs)
def get_output_shape_for(self, input_shape):
input_shape = list(input_shape)
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i] = target_dim
return tuple(output_shape)
def compute_output_shape(self, input_shape):
return self.get_output_shape_for(input_shape)
def call(self, x, mask=None):
return K.permute_dimensions(x, self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(fPermute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Demerge(Layer):
def __init__(self, model):
pass
def call():
pass
def get_output_shape_for(self, input_shape):
pass
def compute_output_shape(self, input_shape):
pass
class DePool(Layer):
def __init__(self, model,
pool_layer_origin=['pool_0'], stride=(2, 2),
**kwargs):
self.stride = stride
self.model = model
self.pool_layer_origin = pool_layer_origin
super(DePool, self).__init__(**kwargs)
def _get_pool_flags(self, pool):
# permutation needed if the layer is in the 'normal' not the pylearn
# order, maybe make a switch for that and the channel order
input_ = K.permute_dimensions(pool.get_input_at(0), (0, 1, 2, 3))
pooled = K.permute_dimensions(pool.get_output_at(0), (0, 1, 2, 3))
pooled = K.repeat_elements(pooled, self.stride[0], axis=-2)
pooled = K.repeat_elements(pooled, self.stride[1], axis=-1)
print 'shapes before k.equal %s \t %s' % (K.int_shape(input_),
K.int_shape(pooled))
return K.equal(input_, pooled)
def call(self, x):
pool = self.model
for name in self.pool_layer_origin:
pool = pool.get_layer(name)
flags = self._get_pool_flags(pool)
x_up = K.repeat_elements(x, self.stride[0], axis=-2)
x_up = K.repeat_elements(x_up, self.stride[1], axis=-1)
print 'shapes before * %s ' % str(K.int_shape(x_up))
x_up = x_up * K.cast(flags, 'float32')
return x_up
def get_output_shape_for(self, input_shape):
m_b, l, w, h = input_shape
return (m_b, l, self.stride[0] * w, self.stride[1] * h)
def compute_output_shape(self, input_shape):
return self.get_output_shape_for(input_shape)
# from keras1:
class MaxoutDense(Layer):
"""A dense maxout layer.
A `MaxoutDense` layer takes the element-wise maximum of
`nb_feature` `Dense(input_dim, output_dim)` linear layers.
This allows the layer to learn a convex,
piecewise linear activation function over the inputs.
Note that this is a *linear* layer;
if you wish to apply activation function
(you shouldn't need to --they are universal function approximators),
an `Activation` layer must be added after.
# Arguments
output_dim: int > 0.
nb_feature: number of Dense layers to use internally.
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights
initialization. This parameter is only relevant
if you don't pass a `weights` argument.
weights: list of Numpy arrays to set as initial weights.
The list should have 2 elements, of shape `(input_dim, output_dim)`
and (output_dim,) for weights and biases respectively.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
input_dim: dimensionality of the input (integer). This argument
(or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(nb_samples, input_dim)`.
# Output shape
2D tensor with shape: `(nb_samples, output_dim)`.
# References
- [Maxout Networks](http://arxiv.org/abs/1302.4389)
"""
def __init__(self, output_dim,
nb_feature=4,
init='glorot_uniform',
weights=None,
W_regularizer=None,
b_regularizer=None,
activity_regularizer=None,
W_constraint=None,
b_constraint=None,
bias=True,
input_dim=None,
**kwargs):
self.output_dim = output_dim
self.nb_feature = nb_feature
self.init = initializers.get(init)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = InputSpec(ndim=2)
self.input_dim = input_dim
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(MaxoutDense, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
self.input_spec = InputSpec(dtype=K.floatx(),
shape=(None, input_dim))
self.W = self.add_weight((self.nb_feature, input_dim, self.output_dim),
initializer=self.init,
name='W',
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((self.nb_feature, self.output_dim,),
initializer='zero',
name='b',
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return (input_shape[0], self.output_dim)
def call(self, x):
# no activation, this layer is only linear.
output = K.dot(x, self.W)
if self.bias:
output += self.b
output = K.max(output, axis=1)
return output
def get_config(self):
config = {'output_dim': self.output_dim,
'init': initializers.serialize(self.init),
'nb_feature': self.nb_feature,
'W_regularizer': regularizers.serialize(self.W_regularizer),
'b_regularizer': regularizers.serialize(self.b_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'W_constraint': constraints.serialize(self.W_constraint),
'b_constraint': constraints.serialize(self.b_constraint),
'bias': self.bias,
'input_dim': self.input_dim}
base_config = super(MaxoutDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
|
# Copyright (c) 2013-2015 Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
import logging
import xml.etree.ElementTree as ET
from .metadata import TDSCatalogMetadata
from .http_util import create_http_session, urlopen
try:
from urlparse import urljoin
except ImportError:
# Python 3
from urllib.parse import urljoin
log = logging.getLogger("siphon.catalog")
log.setLevel(logging.WARNING)
class TDSCatalog(object):
r"""
An object for holding information from a THREDDS Client Catalog.
Attributes
----------
catalog_url : string
The url path of the catalog to parse.
base_tds_url : string
The top level server address
datasets : Dataset
A dictionary of Dataset object, whose keys are the name of the
dataset's name
services : List
A list of SimpleServices listed in the catalog
catalog_refs : dict
A dictionary of CatalogRef objects whose keys are the name of the
catalog ref title.
"""
def __init__(self, catalog_url):
r"""
Initialize the TDSCatalog object.
Parameters
----------
catalog_url : string
The URL of a THREDDS client catalog
"""
# top level server url
self.catalog_url = catalog_url
self.base_tds_url = catalog_url.split('/thredds/')[0]
session = create_http_session()
# get catalog.xml file
resp = session.get(self.catalog_url)
resp.raise_for_status()
# If we were given an HTML link, warn about it and try to fix to xml
if 'html' in resp.headers['content-type']:
import warnings
new_url = self.catalog_url.replace('html', 'xml')
warnings.warn('URL %s returned HTML. Changing to: %s' % (self.catalog_url,
new_url))
self.catalog_url = new_url
resp = session.get(self.catalog_url)
resp.raise_for_status()
# begin parsing the xml doc
root = ET.fromstring(resp.text)
if "name" in root.attrib:
self.catalog_name = root.attrib["name"]
else:
self.catalog_name = "No name found"
self.datasets = {}
self.services = []
self.catalog_refs = {}
self.metadata = {}
service_skip_count = 0
service_skip = 0
for child in root.iter():
tag_type = child.tag.split('}')[-1]
if tag_type == "dataset":
self._process_dataset(child)
elif tag_type == "catalogRef":
self._process_catalog_ref(child)
elif (tag_type == "metadata") or (tag_type == ""):
self._process_metadata(child, tag_type)
elif tag_type == "service":
if child.attrib["serviceType"] != "Compound":
# we do not want to process single services if they
# are already contained within a compound service, so
# we need to skip over those cases.
if service_skip_count >= service_skip:
self.services.append(SimpleService(child))
service_skip = 0
service_skip_count = 0
else:
service_skip_count += 1
else:
self.services.append(CompoundService(child))
service_skip = self.services[-1].number_of_subservices
service_skip_count = 0
self._process_datasets()
def _process_dataset(self, element):
if "urlPath" in element.attrib:
if element.attrib["urlPath"] == "latest.xml":
ds = Dataset(element, self.catalog_url)
else:
ds = Dataset(element)
self.datasets[ds.name] = ds
def _process_catalog_ref(self, element):
catalog_ref = CatalogRef(self.catalog_url, element)
self.catalog_refs[catalog_ref.title] = catalog_ref
def _process_metadata(self, element, tag_type):
if tag_type == "":
log.warning("Trying empty tag type as metadata")
self.metadata = TDSCatalogMetadata(element, self.metadata).metadata
def _process_datasets(self):
for dsName in list(self.datasets.keys()):
self.datasets[dsName].make_access_urls(
self.base_tds_url, self.services, metadata=self.metadata)
class CatalogRef(object):
r"""
An object for holding Catalog References obtained from a THREDDS Client
Catalog.
Attributes
----------
name : string
The name of the catalogRef element
href : string
url to the catalogRef's THREDDS Client Catalog
title : string
Title of the catalogRef element
"""
def __init__(self, base_url, element_node):
r"""
Initialize the catalogRef object.
Parameters
----------
base_url : String
URL to the base catalog that owns this reference
element_node : Element
An Element Tree Element representing a catalogRef node
"""
self.name = element_node.attrib["name"]
self.title = element_node.attrib["{http://www.w3.org/1999/xlink}title"]
# Resolve relative URLs
href = element_node.attrib["{http://www.w3.org/1999/xlink}href"]
self.href = urljoin(base_url, href)
def follow(self):
r"""
Follow the reference, returning a new TDSCatalog
"""
return TDSCatalog(self.href)
class Dataset(object):
r"""
An object for holding Datasets obtained from a THREDDS Client Catalog.
Attributes
----------
name : string
The name of the Dataset element
url_path : string
url to the accessible dataset
access_urls : dict
A dictionary of access urls whose keywords are the access service
types defined in the catalog (for example, "OPENDAP", "NetcdfSubset",
"WMS", etc.
"""
def __init__(self, element_node, catalog_url=""):
r"""
Initialize the Dataset object.
Parameters
----------
element_node : Element
An Element Tree Element representing a Dataset node
catalog_url : string
The top level server url
"""
self.name = element_node.attrib['name']
self.url_path = element_node.attrib['urlPath']
self._resolved = False
self._resolverUrl = None
# if latest.xml, resolve the latest url
if self.url_path == "latest.xml":
if catalog_url != "":
self._resolved = True
self._resolverUrl = self.url_path
self.url_path = self.resolve_url(catalog_url)
else:
log.warning('Must pass along the catalog URL to resolve '
'the latest.xml dataset!')
def resolve_url(self, catalog_url):
r"""
Resolve the url of the dataset when reading latest.xml
Parameters
----------
catalog_url : string
The catalog url to be resolved
"""
if catalog_url != "":
resolver_base = catalog_url.split("catalog.xml")[0]
resolver_url = resolver_base + self.url_path
resolver_xml = urlopen(resolver_url)
tree = ET.parse(resolver_xml)
root = tree.getroot()
if "name" in root.attrib:
self.catalog_name = root.attrib["name"]
else:
self.catalog_name = "No name found"
resolved_url = ''
found = False
for child in root.iter():
if not found:
tag_type = child.tag.split('}')[-1]
if tag_type == "dataset":
if "urlPath" in child.attrib:
ds = Dataset(child)
resolved_url = ds.url_path
found = True
if found:
return resolved_url
else:
log.warning("no dataset url path found in latest.xml!")
def make_access_urls(self, catalog_url, all_services, metadata=None):
r"""
Make fully qualified urls for the access methods enabled on the
dataset.
Parameters
----------
catalog_url : string
The top level server url
services : list
list of SimpleService objects associated with the dataset
"""
service_name = None
if metadata:
if "serviceName" in metadata:
service_name = metadata["serviceName"]
access_urls = {}
server_url = catalog_url.split('/thredds/')[0]
found_service = None
if service_name:
for service in all_services:
if service.name == service_name:
found_service = service
break
service = found_service
if service:
if service.service_type != 'Resolver':
if isinstance(service, CompoundService):
for subservice in service.services:
access_urls[subservice.service_type] = server_url + \
subservice.base + self.url_path
else:
access_urls[service.service_type] = server_url + \
service.base + self.url_path
self.access_urls = access_urls
class SimpleService(object):
r"""
An object for holding information about an access service enabled on a
dataset.
Attributes
----------
name : string
The name of the service
service_type : string
The service type (i.e. "OPENDAP", "NetcdfSubset", "WMS", etc.)
access_urls : dict
A dictionary of access urls whose keywords are the access service
types defined in the catalog (for example, "OPENDAP", "NetcdfSubset",
"WMS", etc.)
"""
def __init__(self, service_node):
r"""
Initialize the Dataset object.
Parameters
----------
service_node : Element
An Element Tree Element representing a service node
"""
self.name = service_node.attrib['name']
self.service_type = service_node.attrib['serviceType']
self.base = service_node.attrib['base']
class CompoundService(object):
r"""
An object for holding information about an Compound services.
Attributes
----------
name : string
The name of the compound service
service_type : string
The service type (for this object, service type will always be
"COMPOUND")
services : list
A list of SimpleService objects
"""
def __init__(self, service_node):
r"""
Initialize a CompoundService object.
Parameters
----------
service_node : Element
An Element Tree Element representing a compound service node
"""
self.name = service_node.attrib['name']
self.service_type = service_node.attrib['serviceType']
self.base = service_node.attrib['base']
services = []
subservices = 0
for child in list(service_node):
services.append(SimpleService(child))
subservices += 1
self.services = services
self.number_of_subservices = subservices
def _get_latest_cat(catalog_url):
r"""
Get the latest dataset catalog from the supplied top level dataset catalog
url.
Parameters
----------
catalog_url : string
The URL of a top level data catalog
access_method : String
desired data access method (i.e. "OPENDAP", "NetcdfSubset", "WMS", etc)
Returns
-------
TDSCatalog
A TDSCatalog object containing the information from the latest dataset
"""
cat = TDSCatalog(catalog_url)
for service in cat.services:
if (service.name.lower() == "latest" and
service.service_type.lower() == "resolver"):
latest_cat = cat.catalog_url.replace("catalog.xml", "latest.xml")
return TDSCatalog(latest_cat)
log.error('ERROR: "latest" service not enabled for this catalog!')
def get_latest_access_url(catalog_url, access_method):
r"""
Get the data access url, using a specified access method, to the latest
data available from a top level dataset catalog (url). Currently only
supports the existence of one "latest" dataset.
Parameters
----------
catalog_url : string
The URL of a top level data catalog
access_method : String
desired data access method (i.e. "OPENDAP", "NetcdfSubset", "WMS", etc)
Returns
-------
string
Data access URL to be used to access the latest data available from a
given catalog using the specified `access_method`. Typical of length 1,
but not always.
"""
latest_cat = _get_latest_cat(catalog_url)
if latest_cat != "":
if len(list(latest_cat.datasets.keys())) > 0:
latest_ds = []
for lds_name in latest_cat.datasets:
lds = latest_cat.datasets[lds_name]
if access_method in lds.access_urls:
latest_ds.append(lds.access_urls[access_method])
if len(latest_ds) == 1:
latest_ds = latest_ds[0]
return latest_ds
else:
log.error('ERROR: More than one latest dataset found '
'this case is currently not suppored in '
'siphon.')
else:
log.error('ERROR: More than one access url matching the '
'requested access method...clearly this is an error')
|
|
from django.utils.translation import ugettext as _
from dimagi.ext.jsonobject import JsonObject, StringProperty, BooleanProperty, ListProperty, DictProperty, ObjectProperty
from jsonobject.base import DefaultProperty
from sqlagg import CountUniqueColumn, SumColumn
from sqlagg.columns import (
MonthColumn,
SimpleColumn,
YearColumn,
)
from corehq.apps.reports.sqlreport import DatabaseColumn, AggregateColumn
from corehq.apps.userreports.indicators.specs import DataTypeProperty
from corehq.apps.userreports.reports.filters import DateFilterValue, ChoiceListFilterValue, \
NumericFilterValue
from corehq.apps.userreports.specs import TypeProperty
from corehq.apps.userreports.sql import get_expanded_column_config, SqlColumnConfig
from corehq.apps.userreports.transforms.factory import TransformFactory
from corehq.apps.userreports.util import localize
SQLAGG_COLUMN_MAP = {
'count_unique': CountUniqueColumn,
'month': MonthColumn,
'sum': SumColumn,
'simple': SimpleColumn,
'year': YearColumn,
}
ASCENDING = "ASC"
DESCENDING = "DESC"
class ReportFilter(JsonObject):
type = StringProperty(required=True)
slug = StringProperty(required=True)
field = StringProperty(required=True)
display = DefaultProperty()
compare_as_string = BooleanProperty(default=False)
def create_filter_value(self, value):
return {
'date': DateFilterValue,
'numeric': NumericFilterValue,
'choice_list': ChoiceListFilterValue,
'dynamic_choice_list': ChoiceListFilterValue,
}[self.type](self, value)
class ReportColumn(JsonObject):
type = StringProperty(required=True)
column_id = StringProperty(required=True)
display = DefaultProperty()
description = StringProperty()
transform = DictProperty()
def format_data(self, data):
"""
Subclasses can apply formatting to the entire dataset.
"""
pass
def get_sql_column_config(self, data_source_config, lang):
raise NotImplementedError('subclasses must override this')
def get_format_fn(self):
"""
A function that gets applied to the data just in time before the report is rendered.
"""
if self.transform:
return TransformFactory.get_transform(self.transform).get_transform_function()
return None
def get_group_by_columns(self):
raise NotImplementedError(_("You can't group by columns of type {}".format(self.type)))
def get_header(self, lang):
return localize(self.display, lang)
def get_column_ids(self):
"""
Used as an abstraction layer for columns that can contain more than one data column
(for example, PercentageColumns).
"""
return [self.column_id]
class FieldColumn(ReportColumn):
type = TypeProperty('field')
field = StringProperty(required=True)
aggregation = StringProperty(
choices=SQLAGG_COLUMN_MAP.keys(),
required=True,
)
format = StringProperty(default='default', choices=[
'default',
'percent_of_total',
])
@classmethod
def wrap(cls, obj):
# lazy migrations for legacy data.
# todo: remove once all reports are on new format
# 1. set column_id to alias, or field if no alias found
_add_column_id_if_missing(obj)
# 2. if aggregation='expand' convert to ExpandedColumn
if obj.get('aggregation') == 'expand':
del obj['aggregation']
obj['type'] = 'expanded'
return ExpandedColumn.wrap(obj)
return super(FieldColumn, cls).wrap(obj)
def format_data(self, data):
if self.format == 'percent_of_total':
column_name = self.column_id
total = sum(row[column_name] for row in data)
for row in data:
row[column_name] = '{:.0%}'.format(
float(row[column_name]) / total
)
def get_sql_column_config(self, data_source_config, lang):
return SqlColumnConfig(columns=[
DatabaseColumn(
header=self.get_header(lang),
agg_column=SQLAGG_COLUMN_MAP[self.aggregation](self.field, alias=self.column_id),
sortable=False,
data_slug=self.column_id,
format_fn=self.get_format_fn(),
help_text=self.description
)
])
def get_group_by_columns(self):
return [self.column_id]
class ExpandedColumn(ReportColumn):
type = TypeProperty('expanded')
field = StringProperty(required=True)
@classmethod
def wrap(cls, obj):
# lazy migrations for legacy data.
# todo: remove once all reports are on new format
_add_column_id_if_missing(obj)
return super(ExpandedColumn, cls).wrap(obj)
def get_sql_column_config(self, data_source_config, lang):
return get_expanded_column_config(data_source_config, self, lang)
class AggregateDateColumn(ReportColumn):
"""
Used for grouping months and years together.
"""
type = TypeProperty('aggregate_date')
field = StringProperty(required=True)
def get_sql_column_config(self, data_source_config, lang):
return SqlColumnConfig(columns=[
AggregateColumn(
header=self.get_header(lang),
aggregate_fn=lambda year, month: {'year': year, 'month': month},
format_fn=self.get_format_fn(),
columns=[
YearColumn(self.field, alias=self._year_column_alias()),
MonthColumn(self.field, alias=self._month_column_alias()),
],
slug=self.column_id,
data_slug=self.column_id,
)],
)
def _year_column_alias(self):
return '{}_year'.format(self.column_id)
def _month_column_alias(self):
return '{}_month'.format(self.column_id)
def get_format_fn(self):
# todo: support more aggregation/more formats
return lambda data: '{}-{:02d}'.format(int(data['year']), int(data['month']))
def get_group_by_columns(self):
return [self._year_column_alias(), self._month_column_alias()]
class PercentageColumn(ReportColumn):
type = TypeProperty('percent')
numerator = ObjectProperty(FieldColumn, required=True)
denominator = ObjectProperty(FieldColumn, required=True)
format = StringProperty(choices=['percent', 'fraction', 'both'], default='percent')
def get_sql_column_config(self, data_source_config, lang):
# todo: better checks that fields are not expand
num_config = self.numerator.get_sql_column_config(data_source_config, lang)
denom_config = self.denominator.get_sql_column_config(data_source_config, lang)
return SqlColumnConfig(columns=[
AggregateColumn(
header=self.get_header(lang),
aggregate_fn=lambda n, d: {'num': n, 'denom': d},
format_fn=self.get_format_fn(),
columns=[c.view for c in num_config.columns + denom_config.columns],
slug=self.column_id,
data_slug=self.column_id,
)],
warnings=num_config.warnings + denom_config.warnings,
)
def get_format_fn(self):
NO_DATA_TEXT = '--'
CANT_CALCULATE_TEXT = '?'
def _pct(data):
if data['denom']:
try:
return '{0:.0f}%'.format((float(data['num']) / float(data['denom'])) * 100)
except (ValueError, TypeError):
return CANT_CALCULATE_TEXT
return NO_DATA_TEXT
_fraction = lambda data: '{num}/{denom}'.format(**data)
return {
'percent': _pct,
'fraction': _fraction,
'both': lambda data: '{} ({})'.format(_pct(data), _fraction(data))
}[self.format]
def get_column_ids(self):
# override this to include the columns for the numerator and denominator as well
return [self.column_id, self.numerator.column_id, self.denominator.column_id]
def _add_column_id_if_missing(obj):
if obj.get('column_id') is None:
obj['column_id'] = obj.get('alias') or obj['field']
class FilterChoice(JsonObject):
value = DefaultProperty()
display = StringProperty()
def get_display(self):
return self.display or self.value
class FilterSpec(JsonObject):
"""
This is the spec for a report filter - a thing that should show up as a UI filter element
in a report (like a date picker or a select list).
"""
type = StringProperty(required=True, choices=['date', 'numeric', 'choice_list', 'dynamic_choice_list'])
slug = StringProperty(required=True) # this shows up as the ID in the filter HTML
field = StringProperty(required=True) # this is the actual column that is queried
display = DefaultProperty()
required = BooleanProperty(default=False)
datatype = DataTypeProperty(default='string')
def get_display(self):
return self.display or self.slug
class DateFilterSpec(FilterSpec):
compare_as_string = BooleanProperty(default=False)
class ChoiceListFilterSpec(FilterSpec):
type = TypeProperty('choice_list')
show_all = BooleanProperty(default=True)
datatype = DataTypeProperty(default='string')
choices = ListProperty(FilterChoice)
class DynamicChoiceListFilterSpec(FilterSpec):
type = TypeProperty('dynamic_choice_list')
show_all = BooleanProperty(default=True)
datatype = DataTypeProperty(default='string')
@property
def choices(self):
return []
class NumericFilterSpec(FilterSpec):
type = TypeProperty('numeric')
class ChartSpec(JsonObject):
type = StringProperty(required=True)
title = StringProperty()
class PieChartSpec(ChartSpec):
type = TypeProperty('pie')
aggregation_column = StringProperty()
value_column = StringProperty(required=True)
class MultibarChartSpec(ChartSpec):
type = TypeProperty('multibar')
aggregation_column = StringProperty()
x_axis_column = StringProperty(required=True)
y_axis_columns = ListProperty(unicode)
is_stacked = BooleanProperty(default=False)
class MultibarAggregateChartSpec(ChartSpec):
type = TypeProperty('multibar-aggregate')
primary_aggregation = StringProperty(required=True)
secondary_aggregation = StringProperty(required=True)
value_column = StringProperty(required=True)
class OrderBySpec(JsonObject):
field = StringProperty()
order = StringProperty(choices=[ASCENDING, DESCENDING], default=ASCENDING)
|
|
"""
Listener handling functionality for Empire.
Handles listener startup from the database, listener
shutdowns, and maintains the current listener
configuration.
"""
import http
import helpers
from pydispatch import dispatcher
import hashlib
import sqlite3
class Listeners:
def __init__(self, MainMenu, args=None):
# pull out the controller objects
self.mainMenu = MainMenu
self.conn = MainMenu.conn
self.agents = MainMenu.agents
self.modules = None
self.stager = None
self.installPath = self.mainMenu.installPath
# {listenerId : EmpireServer object}
self.listeners = {}
self.args = args
# used to get a dict back from the query
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
# set the initial listener config to be the config defaults
self.conn.row_factory = dict_factory
cur = self.conn.cursor()
cur.execute("SELECT staging_key,default_delay,default_jitter,default_profile,default_cert_path,default_port,default_lost_limit FROM config")
defaults = cur.fetchone()
cur.close()
self.conn.row_factory = None
# the current listener config options
self.options = {
'Name' : {
'Description' : 'Listener name.',
'Required' : True,
'Value' : 'test'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://" + helpers.lhost() + ":" + defaults['default_port']
},
'Type' : {
'Description' : 'Listener type (native, pivot, hop, foreign, meter).',
'Required' : True,
'Value' : "native"
},
'RedirectTarget' : {
'Description' : 'Listener target to redirect to for pivot/hop.',
'Required' : False,
'Value' : ""
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : defaults['staging_key']
},
'DefaultDelay' : {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : True,
'Value' : defaults['default_delay']
},
'DefaultJitter' : {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : defaults['default_jitter']
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : defaults['default_lost_limit']
},
'DefaultProfile' : {
'Description' : 'Default communication profile for the agent.',
'Required' : True,
'Value' : defaults['default_profile']
},
'CertPath' : {
'Description' : 'Certificate path for https listeners.',
'Required' : False,
'Value' : defaults['default_cert_path']
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : defaults['default_port']
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
}
}
def start_existing_listeners(self):
"""
Startup any listeners that are current in the database.
"""
cur = self.conn.cursor()
cur.execute("SELECT id,name,host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,default_lost_limit FROM listeners")
results = cur.fetchall()
cur.close()
# for each listener in the database, add it to the cache
for result in results:
# don't start the listener unless it's a native one
if result[11] != "native":
self.listeners[result[0]] = None
else:
lhost = http.host2lhost(result[2])
port = result[3]
# if cert_path is empty, no ssl is used
cert_path = result[4]
# build the handler server and kick if off
server = http.EmpireServer(self.agents, lhost=lhost, port=port, cert=cert_path)
# check if the listener started correctly
if server.success:
server.start()
if (server.base_server()):
# store off this servers in the "[id] : server" object array
# only if the server starts up correctly
self.listeners[result[0]] = server
def set_listener_option(self, option, value):
"""
Set a listener option in the listener dictionary.
"""
# parse and auto-set some host parameters
if option == "Host":
if not value.startswith("http"):
# if there's a current ssl cert path set, assume this is https
if self.options['CertPath']['Value'] != "":
self.options['Host']['Value'] = "https://"+str(value)
else:
# otherwise assume it's http
self.options['Host']['Value'] = "http://"+str(value)
# if there's a port specified, set that as well
parts = value.split(":")
if len(parts) > 1:
self.options['Host']['Value'] = self.options['Host']['Value'] + ":" + str(parts[1])
self.options['Port']['Value'] = parts[1]
elif value.startswith("https"):
self.options['Host']['Value'] = value
if self.options['CertPath']['Value'] == "":
print helpers.color("[!] Error: Please specify a SSL cert path first")
return False
else:
parts = value.split(":")
# check if we have a port to extract
if len(parts) == 3:
# in case there's a resource uri at the end
parts = parts[2].split("/")
self.options['Port']['Value'] = parts[0]
else:
self.options['Port']['Value'] = "443"
elif value.startswith("http"):
self.options['Host']['Value'] = value
parts = value.split(":")
# check if we have a port to extract
if len(parts) == 3:
# in case there's a resource uri at the end
parts = parts[2].split("/")
self.options['Port']['Value'] = parts[0]
else:
self.options['Port']['Value'] = "80"
return True
elif option == "CertPath":
self.options[option]['Value'] = value
host = self.options["Host"]['Value']
# if we're setting a SSL cert path, but the host is specific at http
if host.startswith("http:"):
self.options["Host"]['Value'] = self.options["Host"]['Value'].replace("http:", "https:")
return True
elif option == "Port":
self.options[option]['Value'] = value
# set the port in the Host configuration as well
host = self.options["Host"]['Value']
parts = host.split(":")
if len(parts) == 2 or len(parts) == 3:
self.options["Host"]['Value'] = parts[0] + ":" + parts[1] + ":" + str(value)
return True
elif option == "StagingKey":
# if the staging key isn't 32 characters, assume we're md5 hashing it
if len(value) != 32:
self.options[option]['Value'] = hashlib.md5(value).hexdigest()
return True
elif option in self.options:
self.options[option]['Value'] = value
if option.lower() == "type":
if value.lower() == "hop":
# set the profile for hop.php for hop
parts = self.options['DefaultProfile']['Value'].split("|")
self.options['DefaultProfile']['Value'] = "/hop.php|" + "|".join(parts[1:])
return True
else:
print helpers.color("[!] Error: invalid option name")
return False
def get_listener_options(self):
"""
Return all currently set listener options.
"""
return self.options.keys()
def kill_listener(self, listenerId):
"""
Shut a listener down and remove it from the database.
"""
self.shutdown_listener(listenerId)
self.delete_listener(listenerId)
def delete_listener(self, listenerId):
"""
Shut down the server associated with a listenerId and delete the
listener from the database.
"""
# see if we were passed a name instead of an ID
nameid = self.get_listener_id(listenerId)
if nameid : listenerId = nameid
# shut the listener down and remove it from the cache
self.shutdown_listener(listenerId)
# remove the listener from the database
cur = self.conn.cursor()
cur.execute("DELETE FROM listeners WHERE id=?", [listenerId])
cur.close()
def shutdown_listener(self, listenerId):
"""
Shut down the server associated with a listenerId/name, but DON'T
delete it from the database.
If the listener is a pivot, task the associated agent to kill the redirector.
"""
try:
# get the listener information
[ID,name,host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,default_lost_limit] = self.get_listener(listenerId)
listenerId = int(ID)
if listenerId in self.listeners:
# can't shut down hop, foreign, or meter listeners
if listener_type == "hop" or listener_type == "foreign" or listener_type == "meter":
pass
# if this listener is a pivot, task the associated agent to shut it down
elif listener_type == "pivot":
print helpers.color("[*] Tasking pivot listener to shut down on agent " + name)
killCmd = "netsh interface portproxy reset"
self.agents.add_agent_task(name, "TASK_SHELL", killCmd)
else:
# otherwise get the server object associated with this listener and shut it down
self.listeners[listenerId].shutdown()
# remove the listener object from the internal cache
del self.listeners[listenerId]
except Exception as e:
dispatcher.send("[!] Error shutting down listener " + str(listenerId), sender="Listeners")
def get_listener(self, listenerId):
"""
Get the a specific listener from the database.
"""
# see if we were passed a name instead of an ID
nameid = self.get_listener_id(listenerId)
if nameid : listenerId = nameid
cur = self.conn.cursor()
cur.execute("SELECT id,name,host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,default_lost_limit FROM listeners WHERE id=?", [listenerId])
listener = cur.fetchone()
cur.close()
return listener
def get_listeners(self):
"""
Return all listeners in the database.
"""
cur = self.conn.cursor()
cur.execute("SELECT * FROM listeners")
results = cur.fetchall()
cur.close()
return results
def get_listener_names(self):
"""
Return all listener names in the database.
"""
cur = self.conn.cursor()
cur.execute("SELECT name FROM listeners")
results = cur.fetchall()
cur.close()
results = [str(n[0]) for n in results]
return results
def get_listener_ids(self):
"""
Return all listener IDs in the database.
"""
cur = self.conn.cursor()
cur.execute("SELECT id FROM listeners")
results = cur.fetchall()
cur.close()
results = [str(n[0]) for n in results]
return results
def is_listener_valid(self, listenerID):
"""
Check if this listener name or ID is valid/exists.
"""
cur = self.conn.cursor()
cur.execute('SELECT * FROM listeners WHERE id=? or name=? limit 1', [listenerID, listenerID])
results = cur.fetchall()
cur.close()
return len(results) > 0
def is_listener_empire(self, listenerID):
"""
Check if this listener name is for Empire (otherwise for meter).
"""
cur = self.conn.cursor()
cur.execute('SELECT listener_type FROM listeners WHERE id=? or name=? limit 1', [listenerID, listenerID])
results = cur.fetchall()
cur.close()
if results:
if results[0][0].lower() == "meter":
return False
else:
return True
else:
return None
def get_listener_id(self, name):
"""
Resolve a name or port to listener ID.
"""
cur = self.conn.cursor()
cur.execute('SELECT id FROM listeners WHERE name=?', [name])
results = cur.fetchone()
cur.close()
if results:
return results[0]
else:
return None
def get_staging_information(self, listenerId=None, port=None, host=None):
"""
Resolve a name or port to a agent staging information
staging_key, default_delay, default_jitter, default_profile
"""
stagingInformation = None
if(listenerId):
cur = self.conn.cursor()
cur.execute('SELECT host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,default_lost_limit FROM listeners WHERE id=? or name=? limit 1', [listenerId, listenerId])
stagingInformation = cur.fetchone()
cur.close()
elif(port):
cur = self.conn.cursor()
cur.execute("SELECT host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,default_lost_limit FROM listeners WHERE port=?", [port])
stagingInformation = cur.fetchone()
cur.close()
# used to get staging info for hop.php relays
elif(host):
cur = self.conn.cursor()
cur.execute("SELECT host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,default_lost_limit FROM listeners WHERE host=?", [host])
stagingInformation = cur.fetchone()
cur.close()
return stagingInformation
def get_stager_config(self, listenerID):
"""
Returns the (server, stagingKey, pivotServer, hop, defaultDelay) information for this listener.
Used in stagers.py to generate the various stagers.
"""
listener = self.get_listener(listenerID)
if listener:
# TODO: redo this SQL query so it's done by dict values
name = listener[1]
host = listener[2]
port = listener[3]
certPath = listener[4]
stagingKey = listener[5]
defaultDelay = listener[6]
listenerType = listener[11]
redirectTarget = listener[12]
hop = False
# if we have a pivot listener
pivotServer = ""
if listenerType == "pivot":
# get the internal agent IP for this agent
temp = self.agents.get_agent_internal_ip(name)
if(temp):
internalIP = temp[0]
else:
print helpers.color("[!] Agent for pivot listener no longer active.")
return ""
if certPath != "":
pivotServer = "https://"
else:
pivotServer = "http://"
pivotServer += internalIP + ":" + str(port)
elif listenerType == "hop":
hop = True
return (host, stagingKey, pivotServer, hop, defaultDelay)
else:
print helpers.color("[!] Error in listeners.get_stager_config(): no listener information returned")
return None
def validate_listener_options(self):
"""
Validate all currently set listener options.
"""
# make sure all options are set
for option,values in self.options.iteritems():
if values['Required'] and (values['Value'] == ''):
return False
# make sure the name isn't already taken
if self.is_listener_valid(self.options['Name']['Value']):
for x in xrange(1,20):
self.options['Name']['Value'] = self.options['Name']['Value'] + str(x)
if not self.is_listener_valid(self.options['Name']['Value']):
break
if self.is_listener_valid(self.options['Name']['Value']):
print helpers.color("[!] Listener name already used.")
return False
# if this is a pivot or hop listener, make sure we have a redirect listener target
if self.options['Type']['Value'] == "pivot" or self.options['Type']['Value'] == "hop":
if self.options['RedirectTarget']['Value'] == '':
return False
return True
def add_listener_from_config(self):
"""
Start up a new listener with the internal config information.
"""
name = self.options['Name']['Value']
host = self.options['Host']['Value']
port = self.options['Port']['Value']
certPath = self.options['CertPath']['Value']
stagingKey = self.options['StagingKey']['Value']
defaultDelay = self.options['DefaultDelay']['Value']
defaultJitter = self.options['DefaultJitter']['Value']
defaultProfile = self.options['DefaultProfile']['Value']
killDate = self.options['KillDate']['Value']
workingHours = self.options['WorkingHours']['Value']
listenerType = self.options['Type']['Value']
redirectTarget = self.options['RedirectTarget']['Value']
defaultLostLimit = self.options['DefaultLostLimit']['Value']
# validate all of the options
if self.validate_listener_options():
# if the listener name already exists, iterate the name
# until we have a valid one
if self.is_listener_valid(name):
baseName = name
for x in xrange(1,20):
name = str(baseName) + str(x)
if not self.is_listener_valid(name):
break
if self.is_listener_valid(name):
return (False, "Listener name already used.")
# don't actually start a pivot/hop listener, foreign listeners, or meter listeners
if listenerType == "pivot" or listenerType == "hop" or listenerType == "foreign" or listenerType == "meter":
# double-check that the host ends in .php for hop listeners
if listenerType == "hop" and not host.endswith(".php"):
choice = raw_input(helpers.color("[!] Host does not end with .php continue? [y/N] "))
if choice.lower() == "" or choice.lower()[0] == "n":
return (False, "")
cur = self.conn.cursor()
results = cur.execute("INSERT INTO listeners (name, host, port, cert_path, staging_key, default_delay, default_jitter, default_profile, kill_date, working_hours, listener_type, redirect_target,default_lost_limit) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)", [name, host, port, certPath, stagingKey, defaultDelay, defaultJitter, defaultProfile, killDate, workingHours, listenerType, redirectTarget,defaultLostLimit] )
# get the ID for the listener
cur.execute("SELECT id FROM listeners where name=?", [name])
result = cur.fetchone()
cur.close()
self.listeners[result[0]] = None
return (True, name)
else:
lhost = http.host2lhost(host)
# start up the server object
server = http.EmpireServer(self.agents, lhost=lhost, port=port, cert=certPath)
# check if the listener started correctly
if server.success:
server.start()
if (server.base_server()):
# add the listener to the database if start up
cur = self.conn.cursor()
results = cur.execute("INSERT INTO listeners (name, host, port, cert_path, staging_key, default_delay, default_jitter, default_profile, kill_date, working_hours, listener_type, redirect_target, default_lost_limit) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)", [name, host, port, certPath, stagingKey, defaultDelay, defaultJitter, defaultProfile, killDate, workingHours, listenerType, redirectTarget,defaultLostLimit] )
# get the ID for the listener
cur.execute("SELECT id FROM listeners where name=?", [name])
result = cur.fetchone()
cur.close()
# store off this server in the "[id] : server" object array
# only if the server starts up correctly
self.listeners[result[0]] = server
return (True, name)
else:
return (False, "Misc. error starting listener")
else:
return (False, "Error starting listener on port %s, port likely already in use." %(port))
else:
return (False, "Required listener option missing.")
def add_pivot_listener(self, listenerName, sessionID, listenPort):
"""
Add a pivot listener associated with the sessionID agent on listenPort.
This doesn't actually start a server, but rather clones the config
for listenerName and sets everything in the database as appropriate.
"""
# get the internal agent IP for this agent
internalIP = self.agents.get_agent_internal_ip(sessionID)[0]
if internalIP == "":
print helpers.color("[!] Invalid internal IP retrieved for "+sessionID+", not adding as pivot listener.")
# make sure there isn't already a pivot listener on this agent
elif self.is_listener_valid(sessionID):
print helpers.color("[!] Pivot listener already exists on this agent.")
else:
# get the existing listener options
[ID,name,host,port,cert_path,staging_key,default_delay,default_jitter,default_profile,kill_date,working_hours,listener_type,redirect_target,defaultLostLimit] = self.get_listener(listenerName)
cur = self.conn.cursor()
if cert_path != "":
pivotHost = "https://"
else:
pivotHost = "http://"
pivotHost += internalIP + ":" + str(listenPort)
# insert the pivot listener with name=sessionID for the pivot agent
cur.execute("INSERT INTO listeners (name, host, port, cert_path, staging_key, default_delay, default_jitter, default_profile, kill_date, working_hours, listener_type, redirect_target,default_lost_limit) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)", [sessionID, pivotHost, listenPort, cert_path, staging_key, default_delay, default_jitter, default_profile, kill_date, working_hours, "pivot", name,defaultLostLimit] )
# get the ID for the listener
cur.execute("SELECT id FROM listeners where name=?", [sessionID])
result = cur.fetchone()
cur.close()
# we don't actually have a server object, so just store None
self.listeners[result[0]] = None
def killall(self):
"""
Kill all active listeners and remove them from the database.
"""
# get all the listener IDs from the cache and delete each
for listenerId in self.listeners.keys():
self.kill_listener(listenerId)
def shutdownall(self):
"""
Shut down all active listeners but don't clear them from
the database.
Don't shut down pivot/hop listeners.
"""
# get all the listener IDs from the cache and delete each
for listenerId in self.listeners.keys():
# skip pivot/hop listeners
if self.listeners[listenerId]:
self.shutdown_listener(listenerId)
|
|
from django.core.exceptions import ValidationError
try:
# bis 1.8.x
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor
from django.db.models.fields.related import ManyRelatedObjectsDescriptor
from django.db.models.fields.related import ReverseManyRelatedObjectsDescriptor
from django.db.models.fields.related import ForeignRelatedObjectsDescriptor
from django.db.models.fields.related import SingleRelatedObjectDescriptor
pre19syntax = True
except:
# ab 1.9.0
from django.db.models.fields.related import ForwardManyToOneDescriptor
from django.db.models.fields.related import ManyToManyDescriptor
from django.db.models.fields.related import ReverseManyToOneDescriptor
from django.db.models.fields.related import ReverseOneToOneDescriptor
pre19syntax = False
from swampdragon.model_tools import get_property, get_model
from swampdragon.serializers.field_serializers import serialize_field
from swampdragon.serializers.object_map import get_object_map
from swampdragon.serializers.serializer import Serializer
from swampdragon.serializers.serializer_importer import get_serializer
from swampdragon.serializers.field_deserializers import get_deserializer
from swampdragon.serializers.serializer_tools import get_id_mappings
from swampdragon.serializers.validation import ModelValidationError
class ModelSerializerMeta(object):
def __init__(self, options):
self.model = get_model(getattr(options, 'model'))
self.publish_fields = getattr(options, 'publish_fields', None)
if not self.publish_fields:
self.publish_fields = self.get_fields(self.model)
if isinstance(self.publish_fields, str):
self.publish_fields = (self.publish_fields, )
self.update_fields = getattr(options, 'update_fields', ())
if isinstance(self.update_fields, str):
self.update_fields = (self.update_fields, )
self.id_field = getattr(options, 'id_field', 'pk')
self.base_channel = getattr(options, 'base_channel', self.model._meta.model_name)
def get_fields(self, model):
fields = []
for f in model._meta.get_all_field_names():
field = model._meta.get_field_by_name(f)[0]
if hasattr(field, 'get_accessor_name'):
fields.append(field.get_accessor_name())
else:
fields.append(field.name)
return fields
class ModelSerializer(Serializer):
def __init__(self, data=None, instance=None, initial=None):
if data and not isinstance(data, dict):
raise Exception('data needs to be a dictionary')
self.opts = ModelSerializerMeta(self.Meta)
self._instance = instance
self._data = data
self.initial = initial or {}
self.base_fields = self._get_base_fields()
self.m2m_fields = self._get_m2m_fields()
self.related_fields = self._get_related_fields()
self.errors = {}
class Meta(object):
pass
@property
def instance(self):
return self._instance
def _get_base_fields(self):
return [f.name for f in self.opts.model._meta.fields]
def _get_related_fields(self):
return [f for f in self.opts.update_fields if f not in self.base_fields and f not in self.m2m_fields]
def _get_m2m_fields(self):
related_m2m = [f.get_accessor_name() for f in self.opts.model._meta.get_all_related_many_to_many_objects()]
m2m_fields = [f.name for f in self.opts.model._meta.local_many_to_many]
m2m = m2m_fields + related_m2m
return [f for f in self.opts.update_fields if f in m2m]
def deserialize(self):
# Set initial data
if not self._instance:
self._instance = self.opts.model()
for key, val in self.initial.items():
setattr(self.instance, key, val)
# Deserialize base fields
for key, val in self._data.items():
if key not in self.opts.update_fields or key not in self.base_fields:
continue
try:
self.validate_field(key, val, self._data)
self._deserialize_field(key, val)
except ModelValidationError as err:
self.errors.update(err.get_error_dict())
if self.errors:
raise ModelValidationError(errors=self.errors)
return self.instance
def save(self):
self.deserialize()
if self.errors:
raise ModelValidationError(self.errors)
try:
self.instance.clean_fields()
except ValidationError as e:
raise ModelValidationError(e.message_dict)
self.instance.save()
# Serialize related fields
for key, val in self._data.items():
if key not in self.related_fields:
continue
self._deserialize_related(key, val)
# Serialize m2m fields
for key, val in self._data.items():
if key not in self.m2m_fields:
continue
self._deserialize_related(key, val, save_instance=True)
return self.instance
def _deserialize_field(self, key, val):
if hasattr(self, key):
serializer = self._get_related_serializer(key)
value = serializer(val).save()
setattr(self.instance, key, value)
value.save()
return
field = self.opts.model._meta.get_field(key)
field_type = field.__class__.__name__
deserializer = get_deserializer(field_type)
if deserializer:
deserializer(self.instance, key, val)
else:
setattr(self.instance, key, val)
def _deserialize_related(self, key, val, save_instance=False):
serializer = self._get_related_serializer(key)
if isinstance(val, list):
for v in val:
related_instance = serializer(v).deserialize()
if save_instance:
related_instance.save()
getattr(self.instance, key).add(related_instance)
else:
if serializer:
related_instance = serializer(val).deserialize()
setattr(self.instance, key, related_instance)
else:
setattr(self.instance, key, val)
def _get_related_serializer(self, key):
serializer = getattr(self, key, None)
if isinstance(serializer, str):
return get_serializer(serializer, self.__class__)
return serializer
def get_object_map_data(self):
return {
'id': getattr(self.instance, self.opts.id_field),
'_type': self.opts.model._meta.model_name
}
def serialize(self, fields=None, ignore_serializers=None):
if not fields:
fields = self.opts.publish_fields
if not self.instance:
return None
data = self.get_object_map_data()
# Set all the ids for related models
# so the datamapper can find the connection
data.update(get_id_mappings(self))
# Serialize the fields
for field in fields:
data[field] = self._serialize_value(field, ignore_serializers)
custom_serializer_functions = self._get_custom_field_serializers()
for custom_function, name in custom_serializer_functions:
serializer = getattr(self, name, None)
if serializer:
serializer = get_serializer(serializer, self)
data[name] = custom_function(self.instance, serializer)
else:
data[name] = custom_function(self.instance)
return data
def _serialize_value(self, attr_name, ignore_serializers=None):
obj_serializer = self._get_related_serializer(attr_name)
# To prevent infinite recursion, allow serializers to be ignored
if ignore_serializers and obj_serializer in ignore_serializers:
return None
val = get_property(self.instance, attr_name)
# If we have one or more related models
if obj_serializer and hasattr(val, 'all'):
return [obj_serializer(instance=o).serialize(ignore_serializers=[self.__class__]) for o in val.all()]
elif obj_serializer:
return obj_serializer(instance=val).serialize(ignore_serializers=[self.__class__])
elif hasattr(self.opts.model, attr_name):
# Check if the field is a relation of any kind
field_type = getattr(self.opts.model, attr_name)
if pre19syntax:
# Reverse FK
if isinstance(field_type, ReverseSingleRelatedObjectDescriptor):
rel = get_property(self.instance, attr_name)
if rel:
val = rel.pk
# FK
elif isinstance(field_type, ForeignRelatedObjectsDescriptor):
val = list(get_property(self.instance, attr_name).all().values_list('pk', flat=True))
elif isinstance(field_type, ReverseManyRelatedObjectsDescriptor):
val = list(get_property(self.instance, attr_name).all().values_list('pk', flat=True))
elif isinstance(field_type, ManyRelatedObjectsDescriptor):
val = list(get_property(self.instance, attr_name).all().values_list('pk', flat=True))
else:
if isinstance(field_type, ForwardManyToOneDescriptor):
rel = get_property(self.instance, attr_name)
if rel:
val = rel.pk
elif isinstance(field_type, ReverseManyToOneDescriptor):
val = list(get_property(self.instance, attr_name).all().values_list('pk', flat=True))
elif isinstance(field_type, ManyToManyDescriptor) and field_type.reverse:
al = list(get_property(self.instance, attr_name).all().values_list('pk', flat=True))
elif isinstance(field_type, ManyToManyDescriptor) and not field_type.reverse:
val = list(get_property(self.instance, attr_name).all().values_list('pk', flat=True))
# Serialize the field
return serialize_field(val)
@classmethod
def get_object_map(cls, include_serializers=None, ignore_serializers=None):
return get_object_map(cls, ignore_serializers)
@classmethod
def get_base_channel(cls):
if hasattr(cls.Meta, 'base_channel'):
return '{}|'.format(getattr(cls.Meta, 'base_channel'))
return '{}|'.format(get_model(cls.Meta.model)._meta.model_name)
@classmethod
def get_related_serializers(cls):
possible_serializers = [k for k in cls.__dict__.keys() if not k.startswith('_') and not k == 'Meta']
serializers = []
for possible_serializer in possible_serializers:
val = getattr(cls, possible_serializer)
if isinstance(val, str):
val = get_serializer(val, cls)
if hasattr(val, 'serialize'):
serializers.append((val, possible_serializer))
return serializers
|
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper around git blame that ignores certain commits.
"""
from __future__ import print_function
import argparse
import collections
import logging
import os
import subprocess2
import sys
import git_common
import git_dates
import setup_color
logging.getLogger().setLevel(logging.INFO)
DEFAULT_IGNORE_FILE_NAME = '.git-blame-ignore-revs'
class Commit(object):
"""Info about a commit."""
def __init__(self, commithash):
self.commithash = commithash
self.author = None
self.author_mail = None
self.author_time = None
self.author_tz = None
self.committer = None
self.committer_mail = None
self.committer_time = None
self.committer_tz = None
self.summary = None
self.boundary = None
self.previous = None
self.filename = None
def __repr__(self): # pragma: no cover
return '<Commit %s>' % self.commithash
BlameLine = collections.namedtuple(
'BlameLine',
'commit context lineno_then lineno_now modified')
def parse_blame(blameoutput):
"""Parses the output of git blame -p into a data structure."""
lines = blameoutput.split('\n')
i = 0
commits = {}
while i < len(lines):
# Read a commit line and parse it.
line = lines[i]
i += 1
if not line.strip():
continue
commitline = line.split()
commithash = commitline[0]
lineno_then = int(commitline[1])
lineno_now = int(commitline[2])
try:
commit = commits[commithash]
except KeyError:
commit = Commit(commithash)
commits[commithash] = commit
# Read commit details until we find a context line.
while i < len(lines):
line = lines[i]
i += 1
if line.startswith('\t'):
break
try:
key, value = line.split(' ', 1)
except ValueError:
key = line
value = True
setattr(commit, key.replace('-', '_'), value)
context = line[1:]
yield BlameLine(commit, context, lineno_then, lineno_now, False)
def print_table(table, colsep=' ', rowsep='\n', align=None, out=sys.stdout):
"""Print a 2D rectangular array, aligning columns with spaces.
Args:
align: Optional string of 'l' and 'r', designating whether each column is
left- or right-aligned. Defaults to left aligned.
"""
if len(table) == 0:
return
colwidths = None
for row in table:
if colwidths is None:
colwidths = [len(x) for x in row]
else:
colwidths = [max(colwidths[i], len(x)) for i, x in enumerate(row)]
if align is None: # pragma: no cover
align = 'l' * len(colwidths)
for row in table:
cells = []
for i, cell in enumerate(row):
padding = ' ' * (colwidths[i] - len(cell))
if align[i] == 'r':
cell = padding + cell
elif i < len(row) - 1:
# Do not pad the final column if left-aligned.
cell += padding
cells.append(cell)
try:
print(*cells, sep=colsep, end=rowsep, file=out)
except IOError: # pragma: no cover
# Can happen on Windows if the pipe is closed early.
pass
def pretty_print(parsedblame, show_filenames=False, out=sys.stdout):
"""Pretty-prints the output of parse_blame."""
table = []
for line in parsedblame:
author_time = git_dates.timestamp_offset_to_datetime(
line.commit.author_time, line.commit.author_tz)
row = [line.commit.commithash[:8],
'(' + line.commit.author,
git_dates.datetime_string(author_time),
str(line.lineno_now) + ('*' if line.modified else '') + ')',
line.context]
if show_filenames:
row.insert(1, line.commit.filename)
table.append(row)
print_table(table, align='llllrl' if show_filenames else 'lllrl', out=out)
def get_parsed_blame(filename, revision='HEAD'):
blame = git_common.blame(filename, revision=revision, porcelain=True)
return list(parse_blame(blame))
# Map from (oldrev, newrev) to hunk list (caching the results of git diff, but
# only the hunk line numbers, not the actual diff contents).
# hunk list contains (old, new) pairs, where old and new are (start, length)
# pairs. A hunk list can also be None (if the diff failed).
diff_hunks_cache = {}
def cache_diff_hunks(oldrev, newrev):
def parse_start_length(s):
# Chop the '-' or '+'.
s = s[1:]
# Length is optional (defaults to 1).
try:
start, length = s.split(',')
except ValueError:
start = s
length = 1
return int(start), int(length)
try:
return diff_hunks_cache[(oldrev, newrev)]
except KeyError:
pass
# Use -U0 to get the smallest possible hunks.
diff = git_common.diff(oldrev, newrev, '-U0')
# Get all the hunks.
hunks = []
for line in diff.split('\n'):
if not line.startswith('@@'):
continue
ranges = line.split(' ', 3)[1:3]
ranges = tuple(parse_start_length(r) for r in ranges)
hunks.append(ranges)
diff_hunks_cache[(oldrev, newrev)] = hunks
return hunks
def approx_lineno_across_revs(filename, newfilename, revision, newrevision,
lineno):
"""Computes the approximate movement of a line number between two revisions.
Consider line |lineno| in |filename| at |revision|. This function computes the
line number of that line in |newfilename| at |newrevision|. This is
necessarily approximate.
Args:
filename: The file (within the repo) at |revision|.
newfilename: The name of the same file at |newrevision|.
revision: A git revision.
newrevision: Another git revision. Note: Can be ahead or behind |revision|.
lineno: Line number within |filename| at |revision|.
Returns:
Line number within |newfilename| at |newrevision|.
"""
# This doesn't work that well if there are a lot of line changes within the
# hunk (demonstrated by GitHyperBlameLineMotionTest.testIntraHunkLineMotion).
# A fuzzy heuristic that takes the text of the new line and tries to find a
# deleted line within the hunk that mostly matches the new line could help.
# Use the <revision>:<filename> syntax to diff between two blobs. This is the
# only way to diff a file that has been renamed.
old = '%s:%s' % (revision, filename)
new = '%s:%s' % (newrevision, newfilename)
hunks = cache_diff_hunks(old, new)
cumulative_offset = 0
# Find the hunk containing lineno (if any).
for (oldstart, oldlength), (newstart, newlength) in hunks:
cumulative_offset += newlength - oldlength
if lineno >= oldstart + oldlength:
# Not there yet.
continue
if lineno < oldstart:
# Gone too far.
break
# lineno is in [oldstart, oldlength] at revision; [newstart, newlength] at
# newrevision.
# If newlength == 0, newstart will be the line before the deleted hunk.
# Since the line must have been deleted, just return that as the nearest
# line in the new file. Caution: newstart can be 0 in this case.
if newlength == 0:
return max(1, newstart)
newend = newstart + newlength - 1
# Move lineno based on the amount the entire hunk shifted.
lineno = lineno + newstart - oldstart
# Constrain the output within the range [newstart, newend].
return min(newend, max(newstart, lineno))
# Wasn't in a hunk. Figure out the line motion based on the difference in
# length between the hunks seen so far.
return lineno + cumulative_offset
def hyper_blame(ignored, filename, revision='HEAD', out=sys.stdout,
err=sys.stderr):
# Map from commit to parsed blame from that commit.
blame_from = {}
def cache_blame_from(filename, commithash):
try:
return blame_from[commithash]
except KeyError:
parsed = get_parsed_blame(filename, commithash)
blame_from[commithash] = parsed
return parsed
try:
parsed = cache_blame_from(filename, git_common.hash_one(revision))
except subprocess2.CalledProcessError as e:
err.write(e.stderr)
return e.returncode
new_parsed = []
# We don't show filenames in blame output unless we have to.
show_filenames = False
for line in parsed:
# If a line references an ignored commit, blame that commit's parent
# repeatedly until we find a non-ignored commit.
while line.commit.commithash in ignored:
if line.commit.previous is None:
# You can't ignore the commit that added this file.
break
previouscommit, previousfilename = line.commit.previous.split(' ', 1)
parent_blame = cache_blame_from(previousfilename, previouscommit)
if len(parent_blame) == 0:
# The previous version of this file was empty, therefore, you can't
# ignore this commit.
break
# line.lineno_then is the line number in question at line.commit. We need
# to translate that line number so that it refers to the position of the
# same line on previouscommit.
lineno_previous = approx_lineno_across_revs(
line.commit.filename, previousfilename, line.commit.commithash,
previouscommit, line.lineno_then)
logging.debug('ignore commit %s on line p%d/t%d/n%d',
line.commit.commithash, lineno_previous, line.lineno_then,
line.lineno_now)
# Get the line at lineno_previous in the parent commit.
assert 1 <= lineno_previous <= len(parent_blame)
newline = parent_blame[lineno_previous - 1]
# Replace the commit and lineno_then, but not the lineno_now or context.
logging.debug(' replacing with %r', newline)
line = BlameLine(newline.commit, line.context, lineno_previous,
line.lineno_now, True)
# If any line has a different filename to the file's current name, turn on
# filename display for the entire blame output.
if line.commit.filename != filename:
show_filenames = True
new_parsed.append(line)
pretty_print(new_parsed, show_filenames=show_filenames, out=out)
return 0
def parse_ignore_file(ignore_file):
for line in ignore_file:
line = line.split('#', 1)[0].strip()
if line:
yield line
def main(args, stdout=sys.stdout, stderr=sys.stderr):
parser = argparse.ArgumentParser(
prog='git hyper-blame',
description='git blame with support for ignoring certain commits.')
parser.add_argument('-i', metavar='REVISION', action='append', dest='ignored',
default=[], help='a revision to ignore')
parser.add_argument('--ignore-file', metavar='FILE',
type=argparse.FileType('r'), dest='ignore_file',
help='a file containing a list of revisions to ignore')
parser.add_argument('--no-default-ignores', dest='no_default_ignores',
help='Do not ignore commits from .git-blame-ignore-revs.')
parser.add_argument('revision', nargs='?', default='HEAD', metavar='REVISION',
help='revision to look at')
parser.add_argument('filename', metavar='FILE', help='filename to blame')
args = parser.parse_args(args)
try:
repo_root = git_common.repo_root()
except subprocess2.CalledProcessError as e:
stderr.write(e.stderr)
return e.returncode
# Make filename relative to the repository root, and cd to the root dir (so
# all filenames throughout this script are relative to the root).
filename = os.path.relpath(args.filename, repo_root)
os.chdir(repo_root)
# Normalize filename so we can compare it to other filenames git gives us.
filename = os.path.normpath(filename)
filename = os.path.normcase(filename)
ignored_list = list(args.ignored)
if not args.no_default_ignores and os.path.exists(DEFAULT_IGNORE_FILE_NAME):
with open(DEFAULT_IGNORE_FILE_NAME) as ignore_file:
ignored_list.extend(parse_ignore_file(ignore_file))
if args.ignore_file:
ignored_list.extend(parse_ignore_file(args.ignore_file))
ignored = set()
for c in ignored_list:
try:
ignored.add(git_common.hash_one(c))
except subprocess2.CalledProcessError as e:
# Custom warning string (the message from git-rev-parse is inappropriate).
stderr.write('warning: unknown revision \'%s\'.\n' % c)
return hyper_blame(ignored, filename, args.revision, out=stdout, err=stderr)
if __name__ == '__main__': # pragma: no cover
setup_color.init()
with git_common.less() as less_input:
sys.exit(main(sys.argv[1:], stdout=less_input))
|
|
#!/usr/bin/python
import cgi
import logging
import os
import sys
import time
import urllib
import urlparse
import bs4
from itsdangerous import Signer
"""
Copyright 2011 Jon Rifkin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-----------------------------------------------------------------------
Usage
-----------------------------------------------------------------------
Purpose
Authenticate users against a CAS server from your python cgi scripts.
Using in your script
import pycas
status, userid, cookie = pycas.login(CAS_SERVER, THIS_SCRIPT)
Required Parameters
- CAS_SERVER : the url of your CAS server (for example, https://login.yoursite.edu).
- THIS_SCRIPT: the url of the calling python cgi script.
Returned Values
- status: return code, 0 for success.
- userid: the user name returned by cas.
- cookie: when non-blank, send this cookie to the client's browser so it can authenticate for
the rest of the session.
Optional Parmaters:
- lifetime: lifetime of the cookie in seconds, enforced by pycas. Default is 0, meaning unlimited lifetime.
- path: Authentication cookie applies for all urls under 'path'. Defaults to "/" (all urls).
- protocol: CAS protocol version. Default is 2. Can be set to 1.
- secure: Default is True, which authenticates for https connections only.
- opt: set to 'renew' or 'gateway' for these CAS options.
Examples:
status, userid, cookie = pycas.login(CAS_SERVER, THIS_SCRIPT, protocol=1, secure=True)
status, userid, cookie = pycas.login(CAS_SERVER, THIS_SCRIPT, path="/cgi-bin/accts")
Status Codes are listed below.
"""
# Secret used to produce hash. This can be any string. Hackers who know this string can forge
# this script's authentication cookie.
SECRET = "7e16162998eb7efafb1498f75190a937"
# Name field for pycas cookie
PYCAS_NAME = "pycas"
# CAS Staus Codes: returned to calling program by login() function.
CAS_OK = 0 # CAS authentication successful.
CAS_COOKIE_EXPIRED = 1 # PYCAS cookie exceeded its lifetime.
CAS_COOKIE_INVALID = 2 # PYCAS cookie is invalid (probably corrupted).
CAS_TICKET_INVALID = 3 # CAS server ticket invalid.
CAS_GATEWAY = 4 # CAS server returned without ticket while in gateway mode.
# Status codes returned internally by function get_cookie_status().
COOKIE_AUTH = 0 # PYCAS cookie is valid.
COOKIE_NONE = 1 # No PYCAS cookie found.
COOKIE_GATEWAY = 2 # PYCAS gateway cookie found.
COOKIE_INVALID = 3 # Invalid PYCAS cookie found.
# Status codes returned internally by function get_ticket_status().
TICKET_OK = 0 # Valid CAS server ticket found.
TICKET_NONE = 1 # No CAS server ticket found.
TICKET_INVALID = 2 # Invalid CAS server ticket found.
CAS_MSG = (
"CAS authentication successful.",
"PYCAS cookie exceeded its lifetime.",
"PYCAS cookie is invalid (probably corrupted).",
"CAS server ticket invalid.",
"CAS server returned without ticket while in gateway mode.",
)
# Log file for debugging
LOG_FILE = "/tmp/cas.log"
logging.basicConfig(filename=LOG_FILE, level=logging.DEBUG, format='%(asctime)s %(message)s')
SIGNER = Signer(SECRET)
def _parse_tag(string, tag):
"""
Used for parsing xml. Search string for the first occurence of <tag>.....</tag> and return text (stripped
of leading and tailing whitespace) between tags. Return "" if tag not found.
"""
soup = bs4.BeautifulSoup(string, "xml")
if soup.find(tag) is None:
return ''
return soup.find(tag).string.strip()
def _split2(string, sep):
"""Split string in exactly two pieces, return '' for missing pieces."""
parts = string.split(sep, 1) + ["", ""]
return parts[0], parts[1]
def _make_pycas_cookie(val, domain, path, secure, expires=None):
"""Form cookie."""
pycascookie = "Set-Cookie: {}={};domain={};path={}".format(PYCAS_NAME, val, domain, path)
if secure:
pycascookie += ";secure"
if expires is not None:
pycascookie += ";expires=" + expires
return pycascookie
def _do_redirect(cas_host, service_url, opt, secure):
"""Send redirect to client. This function does not return, i.e. it teminates this script."""
cas_url = cas_host + "/cas/login?service=" + service_url
if opt in ("renew", "gateway"):
cas_url += "&{}=true".format(opt)
# Print redirect page to browser
print("Refresh: 0; url={}".format(cas_url))
print("Content-type: text/html")
if opt == "gateway":
domain, path = urlparse.urlparse(service_url)[1:3]
print(_make_pycas_cookie("gateway", domain, path, secure))
print("\nIf your browser does not redirect you, then please follow <a href=\"{}\">this link</a>.\n".format(cas_url))
sys.exit(1)
def _decode_cookie(cookie_vals, lifetime=None):
"""
Retrieve id from pycas cookie and test data for validity (to prevent malicious users from falsely authenticating).
Return status and id (id will be empty string if unknown).
"""
# Test for now cookies
if cookie_vals is None:
return COOKIE_NONE, ""
# Test each cookie value
cookie_attrs = []
for cookie_val in cookie_vals:
# Remove trailing ;
if cookie_val and cookie_val[-1] == ";":
cookie_val = cookie_val[0:-1]
# Test for pycas gateway cookie
if cookie_val == "gateway":
cookie_attrs.append(COOKIE_GATEWAY)
else: # Test for valid pycas authentication cookie.
# Separate cookie parts
timestr, cookieid = _split2(cookie_val[8:], ":")
# Verify hash
if SIGNER.unsign(cookie_val):
# Check lifetime
if lifetime:
if str(int(time.time()+int(lifetime))) < timestr:
# OK: Cookie still valid.
cookie_attrs.append(COOKIE_AUTH)
else:
# ERROR: Cookie exceeded lifetime
cookie_attrs.append(CAS_COOKIE_EXPIRED)
else:
# OK: Cookie valid (it has no lifetime)
cookie_attrs.append(COOKIE_AUTH)
else:
# ERROR: Cookie value are not consistent
cookie_attrs.append(COOKIE_INVALID)
# Return status according to attribute values
# Valid authentication cookie takes precedence
if COOKIE_AUTH in cookie_attrs:
return COOKIE_AUTH, cookieid
# Gateway cookie takes next precedence
if COOKIE_GATEWAY in cookie_attrs:
return COOKIE_GATEWAY, ""
# If we've gotten here, there should be only one attribute left.
return cookie_attrs[0], ""
def _validate_cas_1(cas_host, service_url, ticket):
"""Validate ticket using cas 1.0 protocol."""
# Second Call to CAS server: Ticket found, verify it.
cas_validate = cas_host + "/cas/validate?ticket=" + ticket + "&service=" + service_url
f_validate = urllib.urlopen(cas_validate)
# Get first line - should be yes or no
response = f_validate.readline()
# Ticket does not validate, return error
if response == "no\n":
f_validate.close()
return TICKET_INVALID, ""
# Ticket validates
else:
# Get id
ticketid = f_validate.readline()
f_validate.close()
ticketid = ticketid.strip()
return TICKET_OK, ticketid
def _validate_cas_2(cas_host, service_url, ticket, opt):
"""
Validate ticket using cas 2.0 protocol
The 2.0 protocol allows the use of the mutually exclusive "renew" and "gateway" options.
"""
# Second Call to CAS server: Ticket found, verify it.
cas_validate = cas_host + "/cas/serviceValidate?ticket=" + ticket + "&service=" + service_url
if opt:
cas_validate += "&{}=true".format(opt)
f_validate = urllib.urlopen(cas_validate)
# Get first line - should be yes or no
response = f_validate.read()
ticketid = _parse_tag(response, "cas:user")
# Ticket does not validate, return error
if ticketid == "":
return TICKET_INVALID, ""
# Ticket validates
else:
return TICKET_OK, ticketid
def _get_cookies():
"""Read cookies from env variable HTTP_COOKIE."""
# Read all cookie pairs
try:
cookie_pairs = os.getenv("HTTP_COOKIE").split()
except AttributeError:
cookie_pairs = []
cookies = {}
for cookie_pair in cookie_pairs:
key, val = _split2(cookie_pair.strip(), "=")
if key in cookies:
cookies[key].append(val)
else:
cookies[key] = [val]
return cookies
def _get_cookie_status():
"""Check pycas cookie."""
cookies = _get_cookies()
return _decode_cookie(cookies.get(PYCAS_NAME))
def _get_ticket_status(cas_host, service_url, protocol, opt):
if "ticket" in cgi.FieldStorage():
ticket = cgi.FieldStorage()["ticket"].value
if protocol == 1:
ticket_status, ticketid = _validate_cas_1(cas_host, service_url, ticket, opt)
else:
ticket_status, ticketid = _validate_cas_2(cas_host, service_url, ticket, opt)
# Make cookie and return id
if ticket_status == TICKET_OK:
return TICKET_OK, id
# Return error status
else:
return ticket_status, ""
else:
return TICKET_NONE, ""
def login(cas_host, service_url, lifetime=None, secure=True, protocol=2, path="/", opt=""):
"""
Login to CAS and return user id. Return status, userid, pycas_cookie.
"""
# TODO lifetime isn't enforced
# Check cookie for previous pycas state, with is either
# COOKIE_AUTH - client already authenticated by pycas.
# COOKIE_GATEWAY - client returning from CAS_SERVER with gateway option set.
# Other cookie status are
# COOKIE_NONE - no cookie found.
# COOKIE_INVALID - invalid cookie found.
cookie_status, cookieid = _get_cookie_status()
if cookie_status == COOKIE_AUTH:
logging.info('login valid for {}'.format(cookieid))
return CAS_OK, cookieid, ""
if cookie_status == COOKIE_INVALID:
return CAS_COOKIE_INVALID, "", ""
# Check ticket ticket returned by CAS server, ticket status can be
# TICKET_OK - a valid authentication ticket from CAS server
# TICKET_INVALID - an invalid authentication ticket.
# TICKET_NONE - no ticket found.
# If ticket is ok, then user has authenticated, return id and
# a pycas cookie for calling program to send to web browser.
ticket_status, ticketid = _get_ticket_status(cas_host, service_url, protocol, opt)
if ticket_status == TICKET_OK:
logging.info('ticket valid for {}'.format(ticketid))
cookie_val = SIGNER.sign(ticketid)
domain = urlparse.urlparse(service_url)[1]
return CAS_OK, ticketid, _make_pycas_cookie(cookie_val, domain, path, secure)
elif ticket_status == TICKET_INVALID:
return CAS_TICKET_INVALID, "", ""
# If unathenticated and in gateway mode, return gateway status and clear
# pycas cookie (which was set to gateway by do_redirect()).
if opt == "gateway":
if cookie_status == COOKIE_GATEWAY:
domain, path = urlparse.urlparse(service_url)[1:3]
# Set cookie expiration in the past to clear the cookie.
past_date = time.strftime("%a, %d-%b-%Y %H:%M:%S %Z", time.localtime(time.time()-48*60*60))
return CAS_GATEWAY, "", _make_pycas_cookie("", domain, path, secure, past_date)
# Do redirect
_do_redirect(cas_host, service_url, opt, secure)
#-----------------------------------------------------------------------
# Test
#-----------------------------------------------------------------------
if __name__ == "__main__":
CAS_SERVER = "https://login.uconn.edu"
SERVICE_URL = "http://bluet.ucc.uconn.edu/~jon/cgi-bin/pycas.py"
status, userid, cookie = login(CAS_SERVER, SERVICE_URL, secure=True, opt="gateway")
print("Content-type: text/html")
print(cookie)
print()
print("""
<html>
<head>
<title>
castest.py
</title>
<style type=text/css>
td {background-color: #dddddd; padding: 4px}
</style>
</head>
<body>
<h2>pycas.py</h2>
<hr>
""")
# Print browser parameters from pycas.login
if "ticket" in cgi.FieldStorage():
ticket = cgi.FieldStorage()["ticket"].value
else:
ticket = ""
in_cookie = os.getenv("HTTP_COOKIE")
print("""
<p>
<b>Parameters sent from browser</b>
<table>
<tr> <td>Ticket</td> <td>{}</td> </tr>
<tr> <td>Cookie</td> <td>{}</td> </tr>
</table>
</p>""".format(ticket, in_cookie))
# Print output from pycas.login
print("""
<p>
<b>Parameters returned from pycas.login()</b>
<table>
<tr><td>status</td><td> <b>{}</b> - <i>{}</i></td></tr>
<tr><td>id</td><td> <b>{}</b></td></tr>
<tr><td>cookie</td><td> <b>{}</b></td></tr>
</table>
</p>
</body></html>""".format(status, CAS_MSG[status], userid, cookie))
|
|
import argparse
import sqlite3
import re
import pandas as pd
import numpy as np
from TestScripts.doc.Structure import *
from TestScripts.doc.Format import *
import os.path
import yaml
refCoreName=""
# Command to get last runid
lastID="""SELECT runid FROM RUN ORDER BY runid DESC LIMIT 1
"""
# Command to get last runid and date
lastIDAndDate="""SELECT date FROM RUN WHERE runid=?
"""
# Command to get last runid
runIDDetails="""SELECT distinct core FROM %s
INNER JOIN CORE USING(coreid)
WHERE %s
"""
def joinit(iterable, delimiter):
# Intersperse a delimiter between element of a list
it = iter(iterable)
yield next(it)
for x in it:
yield delimiter
yield x
def getLastRunID():
r=c.execute(lastID)
return(int(r.fetchone()[0]))
def getrunIDDate(forID):
r=c.execute(lastIDAndDate,(forID,))
return(r.fetchone()[0])
parser = argparse.ArgumentParser(description='Generate summary benchmarks')
parser.add_argument('-b', nargs='?',type = str, default="bench.db", help="Database")
parser.add_argument('-o', nargs='?',type = str, default="full.md", help="Full summary")
parser.add_argument('-r', action='store_true', help="Regression database")
parser.add_argument('-t', nargs='?',type = str, default="md", help="type md or html")
parser.add_argument('-byc', action='store_true', help="Result oganized by Compiler")
parser.add_argument('-g', action='store_true', help="Include graphs in regression report")
parser.add_argument('-details', action='store_true', help="Details about runids")
parser.add_argument('-lastid', action='store_true', help="Get last ID")
parser.add_argument('-comments', nargs='?',type = str, default="comments", help="Comment folder")
parser.add_argument('-byd', action='store_true', help="Result oganized by datatype")
parser.add_argument('-ratio', action='store_true', help="Compute ratios for regression by core instead of cycles")
parser.add_argument('-ref', nargs='?',type = str, default="M55", help="Reference COREDEF for ratio in db")
parser.add_argument('-clampval', nargs='?',type = float, default=8.0, help="Clamp for ratio")
parser.add_argument('-clamp', action='store_true', help="Clamp enabled for ratio")
parser.add_argument('-cores', nargs='?',type = str, help="Cores to keep")
parser.add_argument('-toc', nargs='?',type = str, help="Yaml for the table of contents")
# For runid or runid range
parser.add_argument('others', nargs=argparse.REMAINDER,help="Run ID")
args = parser.parse_args()
c = sqlite3.connect(args.b)
coreidSQL="select distinct coreid from CORE where coredef==?"
def getCoreID(corename):
r=c.execute(coreidSQL,(corename,))
t=r.fetchone()
if t is None:
print("Unrecognized reference core \"%s\"" % corename)
quit()
return(t[0])
def parseSelector(o,field="runid"):
vals=[]
runidCMD=[]
# parameters are not allowed in VIEWs
runidVIEWcmd=[]
for t in o:
if re.search(r'-',t):
bounds=[int(x) for x in t.split("-")]
vals += bounds
runidCMD += ["(%s >= ? AND %s <= ?)" % (field,field)]
x=(field,bounds[0],field,bounds[1])
runidVIEWcmd += ["(%s >= %d AND %s <= %d)" % x]
else:
theid=int(t)
runidCMD += ["%s == ?" % field]
runidVIEWcmd += ["%s == %d" % (field,theid)]
vals.append(theid)
runidval = tuple(vals)
runidCMD = "(" + "".join(joinit(runidCMD," OR ")) + ")"
runidVIEWcmd = "(" + "".join(joinit(runidVIEWcmd," OR ")) + ")"
return(runidval,runidCMD,runidVIEWcmd)
if args.others:
runidval,runidCMD,runidVIEWcmd = parseSelector(args.others)
else:
theid=getLastRunID()
print("Last run ID = %d\n" % theid)
runidval=(theid,)
runidCMD = "runid = ?"
runidVIEWcmd="(runid = %d)" % theid
# None means all
coreidval = []
coreidCMD = []
keepCoreIds=None
if args.cores:
cores=args.cores.split(",")
coreids = [str(getCoreID(x.strip())) for x in cores]
keepCoreIds = coreids.copy()
if args.ref:
coreids.append(str(getCoreID(args.ref.strip())))
#print(coreids)
coreidval,coreidCMD, coreidVIEWcmd = parseSelector(coreids,field="coreid")
runidval += coreidval
runidCMD += " AND %s" % coreidCMD
runidVIEWcmd += " AND %s" % coreidVIEWcmd
# We extract data only from data tables
# Those tables below are used for descriptions
REMOVETABLES=['TESTNAME','TESTDATE','RUN','CORE', 'PLATFORM', 'COMPILERKIND', 'COMPILER', 'TYPE', 'CATEGORY', 'CONFIG']
# This is assuming the database is generated by the regression script
# So platform is the same for all benchmarks.
# Category and type is coming from the test name in the yaml
# So no need to add this information here
# Name is removed here because it is added at the beginning
REMOVECOLUMNS=['runid','name','type','platform','category','coredef','OPTIMIZED','HARDFP','FASTMATH','NEON','HELIUM','UNROLL','ROUNDING','DATE','compilerkindid','date','categoryid', 'ID', 'platformid', 'coreid', 'compilerid', 'typeid']
REMOVECOLUMNSFORHISTORY=['Regression','MAXREGCOEF','name','type','platform','category','coredef','OPTIMIZED','HARDFP','FASTMATH','NEON','HELIUM','UNROLL','ROUNDING','DATE','compilerkindid','date','categoryid', 'ID', 'platformid', 'coreid', 'compilerid', 'typeid']
# Get existing benchmark tables
def getBenchTables():
r=c.execute("SELECT name FROM sqlite_master WHERE type='table'")
benchtables=[]
for table in r:
if not table[0] in REMOVETABLES:
benchtables.append(table[0])
return(benchtables)
# get existing types in a table
def getExistingTypes(benchTable):
r=c.execute("select distinct typeid from %s WHERE %s order by typeid desc " % (benchTable,runidCMD),runidval).fetchall()
result=[x[0] for x in r]
return(result)
# Get existing cores in a table
def getAllExistingCores(benchTable):
r=c.execute("select distinct coreid from %s WHERE %s order by coreid desc " % (benchTable,runidCMD),runidval).fetchall()
result=[x[0] for x in r]
return(result)
def getrunIDDetails():
tables=getBenchTables()
r=[]
for table in tables:
r += [x[0] for x in c.execute(runIDDetails % (table,runidCMD),runidval).fetchall()]
r=list(set(r))
print(r)
if args.lastid:
quit()
if args.details:
getrunIDDetails()
quit()
# Get compilers from specific type and table
allCompilers="""select distinct compilerid from %s WHERE typeid=?"""
# Get compilers from specific type and table
allCompilerForCore="""select distinct compilerid from %s WHERE coreid=?"""
# Get compilers from specific type and table
allCores="""select distinct coreid from %s WHERE typeid=? AND (%s)"""
compilerDesc="""select compiler,version from COMPILER
INNER JOIN COMPILERKIND USING(compilerkindid) WHERE compilerid=?"""
coreDesc="""select core from CORE WHERE coreid=?"""
# Get existing compiler in a table for a specific type
# (In case report is structured by types)
def getExistingCompiler(benchTable,typeid):
r=c.execute(allCompilers % benchTable,(typeid,)).fetchall()
return([x[0] for x in r])
# Get existing compiler in a table for a specific core
# (In case report is structured by core)
def getExistingCompilerForCore(benchTable,coreid):
r=c.execute(allCompilerForCore % benchTable,(coreid,)).fetchall()
return([x[0] for x in r])
def getExistingCores(benchTable,typeid):
vals = (typeid,) + runidval
r=c.execute(allCores % (benchTable,runidCMD),vals).fetchall()
return([x[0] for x in r])
def getCoreDesc(coreid):
r=c.execute(coreDesc,(coreid,)).fetchone()
return(r)
def getCompilerDesc(compilerid):
r=c.execute(compilerDesc,(compilerid,)).fetchone()
return(r)
# Get type name from type id
def getTypeName(typeid):
r=c.execute("select type from TYPE where typeid=?",(typeid,)).fetchone()
return(r[0])
# Diff of 2 lists
def diff(first, second):
second = set(second)
return [item for item in first if item not in second]
# Command to get data for specific compiler
# and type
benchCmdForCoreCompiler="""select %s from %s
INNER JOIN CATEGORY USING(categoryid)
INNER JOIN PLATFORM USING(platformid)
INNER JOIN CORE USING(coreid)
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TYPE USING(typeid)
INNER JOIN TESTNAME USING(testnameid)
WHERE coreid=? AND compilerid = ? AND (%s)
"""
# Command to get data for specific core
# and type
historyCmd="""select %s from %s
INNER JOIN CATEGORY USING(categoryid)
INNER JOIN PLATFORM USING(platformid)
INNER JOIN CORE USING(coreid)
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TYPE USING(typeid)
INNER JOIN TESTNAME USING(testnameid)
WHERE compilerid=? AND coreid=? AND typeid = ? AND ID = ? AND runid > (? - 10)
"""
compilersForHistory="""select distinct compilerid,compiler,version from %s
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
WHERE coreid=? AND typeid = ? AND ID = ? AND runid > (? - 10)
"""
# Command to get data for specific core
# and type
benchCmdForCore="""select %s from %s
INNER JOIN CATEGORY USING(categoryid)
INNER JOIN PLATFORM USING(platformid)
INNER JOIN CORE USING(coreid)
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TYPE USING(typeid)
INNER JOIN TESTNAME USING(testnameid)
WHERE coreid=? AND typeid = ? AND (%s)
"""
coresForHistory="""select distinct coreid,core from %s
INNER JOIN CORE USING(coreid)
WHERE compilerid=? AND typeid = ? AND ID = ? AND runid > (? - 10)
"""
# Command to get data for specific compiler
# and type
benchCmdForCompiler="""select %s from %s
INNER JOIN CATEGORY USING(categoryid)
INNER JOIN PLATFORM USING(platformid)
INNER JOIN CORE USING(coreid)
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TYPE USING(typeid)
INNER JOIN TESTNAME USING(testnameid)
WHERE compilerid=? AND typeid = ? AND (%s)
"""
# Command to get test names for specific compiler
# and type
benchNamesForCore="""select distinct name from %s
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TYPE USING(typeid)
INNER JOIN TESTNAME USING(testnameid)
WHERE coreid=? AND typeid = ? AND (%s)
"""
# Command to get test names for specific core
# and compiler
benchNamesForCoreCompiler="""select distinct name from %s
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TYPE USING(typeid)
INNER JOIN TESTNAME USING(testnameid)
WHERE coreid=? AND compilerid = ? AND (%s)
"""
# Command to get test names for specific compiler
# and type
benchNamesForCompiler="""select distinct name from %s
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TYPE USING(typeid)
INNER JOIN TESTNAME USING(testnameid)
WHERE compilerid=? AND typeid = ? AND (%s)
"""
# Command to get columns for specific table
benchCmdColumns="""select * from %s
INNER JOIN CATEGORY USING(categoryid)
INNER JOIN PLATFORM USING(platformid)
INNER JOIN CORE USING(coreid)
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TESTNAME USING(testnameid)
INNER JOIN TYPE USING(typeid)
"""
def joinit(iterable, delimiter):
it = iter(iterable)
yield next(it)
for x in it:
yield delimiter
yield x
# Is not a column name finishing by id
# (often primary key for thetable)
def isNotIDColumn(col):
if re.match(r'^.*id$',col):
return(False)
else:
return(True)
# Get test names
# for specific core and compiler (for the data)
def getTestNamesForCoreCompiler(benchTable,compilerid,core):
vals=(core,compilerid) + runidval
result=c.execute(benchNamesForCoreCompiler % (benchTable,runidCMD),vals).fetchall()
names=[x[0] for x in list(result)]
return(names)
# Get test names
# for specific typeid and core (for the data)
def getTestNamesForCore(benchTable,core,typeid):
vals=(core,typeid) + runidval
result=c.execute(benchNamesForCore % (benchTable,runidCMD),vals).fetchall()
names=[x[0] for x in list(result)]
return(names)
# Get test names
# for specific typeid and compiler (for the data)
def getTestNamesForCompiler(benchTable,comp,typeid):
vals=(comp,typeid) + runidval
result=c.execute(benchNamesForCompiler % (benchTable,runidCMD),vals).fetchall()
names=[x[0] for x in list(result)]
return(names)
# Command to get data for specific core
# and type
nbElemsInBenchAndTypeAndCoreCmd="""select count(*) from %s
WHERE coreid=? AND typeid = ? AND (%s)
"""
# Command to get data for specific compiler
# and type
nbElemsInBenchAndTypeAndCompilerCmd="""select count(*) from %s
WHERE compilerid=? AND typeid = ? AND (%s)
"""
# Command to get data for specific compiler
# and type
nbElemsInBenchAndCoreAndCompilerCmd="""select count(*) from %s
WHERE compilerid=? AND coreid = ? AND (%s)
"""
nbElemsInBenchAndTypeCmd="""select count(*) from %s
WHERE typeid = ? AND (%s)
"""
nbElemsInBenchAndCoreCmd="""select count(*) from %s
WHERE coreid = ? AND (%s)
"""
nbElemsInBenchCmd="""select count(*) from %s
WHERE %s
"""
categoryName="""select distinct category from %s
INNER JOIN CATEGORY USING(categoryid)
WHERE %s
"""
def getCategoryName(benchTable):
result=c.execute(categoryName % (benchTable,runidCMD),runidval).fetchone()
return(result[0])
# Get nb elems in a table
def getNbElemsInBenchAndTypeAndCoreCmd(benchTable,coreid,typeid):
vals=(coreid,typeid) + runidval
result=c.execute(nbElemsInBenchAndTypeAndCoreCmd % (benchTable,runidCMD),vals).fetchone()
return(result[0])
# Get nb elems in a table
def getNbElemsInBenchAndTypeAndCompilerCmd(benchTable,comp,typeid):
vals=(comp,typeid) + runidval
result=c.execute(nbElemsInBenchAndTypeAndCompilerCmd % (benchTable,runidCMD),vals).fetchone()
return(result[0])
# Get nb elems in a table
def getNbElemsInBenchAndCoreAndCompilerCmd(benchTable,comp,coreid):
vals=(comp,coreid) + runidval
result=c.execute(nbElemsInBenchAndCoreAndCompilerCmd % (benchTable,runidCMD),vals).fetchone()
return(result[0])
def getNbElemsInBenchAndTypeCmd(benchTable,typeid):
vals=(typeid,) + runidval
result=c.execute(nbElemsInBenchAndTypeCmd % (benchTable,runidCMD),vals).fetchone()
return(result[0])
def getNbElemsInBenchAndCoreCmd(benchTable,coreid):
vals=(coreid,) + runidval
result=c.execute(nbElemsInBenchAndCoreCmd % (benchTable,runidCMD),vals).fetchone()
return(result[0])
def getNbElemsInBenchCmd(benchTable):
result=c.execute(nbElemsInBenchCmd % (benchTable,runidCMD),runidval).fetchone()
return(result[0])
# Get names of columns and data for a table
# for specific typeid and coreid (for the data)
def getColNamesAndHistory(benchTable,compiler,core,typeid,testid):
cursor=c.cursor()
result=cursor.execute(benchCmdColumns % (benchTable))
cols= [member[0] for member in cursor.description]
keepCols = ['name'] + [c for c in diff(cols , REMOVECOLUMNSFORHISTORY) if isNotIDColumn(c)]
keepColsStr = "".join(joinit(keepCols,","))
vals=(compiler,core,typeid,testid,runid)
result=cursor.execute(historyCmd % (keepColsStr,benchTable),vals)
vals =np.array([list(x) for x in list(result)])
return(keepCols,vals)
# Get names of columns and data for a table
# for specific typeid and coreid (for the data)
def getColNamesAndDataForCore(benchTable,core,typeid):
cursor=c.cursor()
result=cursor.execute(benchCmdColumns % (benchTable))
cols= [member[0] for member in cursor.description]
keepCols = ['name'] + [c for c in diff(cols , REMOVECOLUMNS) if isNotIDColumn(c)]
keepColsStr = "".join(joinit(keepCols,","))
vals=(core,typeid) + runidval
result=cursor.execute(benchCmdForCore % (keepColsStr,benchTable,runidCMD),vals)
vals =np.array([list(x) for x in list(result)])
return(keepCols,vals)
# Get names of columns and data for a table
# for specific coreid and compilerid (for the data)
def getColNamesAndDataForCoreCompiler(benchTable,compilerid,core):
cursor=c.cursor()
result=cursor.execute(benchCmdColumns % (benchTable))
cols= [member[0] for member in cursor.description]
keepCols = ['name','type'] + [c for c in diff(cols , REMOVECOLUMNS) if isNotIDColumn(c)]
keepColsStr = "".join(joinit(keepCols,","))
vals=(core,compilerid) + runidval
result=cursor.execute(benchCmdForCoreCompiler % (keepColsStr,benchTable,runidCMD),vals)
vals =np.array([list(x) for x in list(result)])
return(keepCols,vals)
# Get names of columns and data for a table
# for specific typeid and compiler (for the data)
def getColNamesAndDataForCompiler(benchTable,comp,typeid):
cursor=c.cursor()
result=cursor.execute(benchCmdColumns % (benchTable))
cols= [member[0] for member in cursor.description]
keepCols = ['name'] + [c for c in diff(cols , REMOVECOLUMNS) if isNotIDColumn(c)]
keepColsStr = "".join(joinit(keepCols,","))
vals=(comp,typeid) + runidval
result=cursor.execute(benchCmdForCompiler % (keepColsStr,benchTable,runidCMD),vals)
vals =np.array([list(x) for x in list(result)])
return(keepCols,vals)
def formatFloat(s):
result=[]
for t in s:
if type(t) is float:
result.append(("%.3f" % t))
else:
result.append(t)
return(result)
PARAMS=["NB","NumTaps", "NBA", "NBB", "Factor", "NumStages","VECDIM","NBR","NBC","NBI","IFFT", "BITREV"]
def regressionTableFor(byname,name,section,ref,toSort,indexCols,field):
data=ref.pivot_table(index=indexCols, columns=byname,
values=[field], aggfunc='first',fill_value="NA")
data=data.sort_values(toSort)
if args.byc:
cores = [(c[1] + ":" + c[2]) for c in list(data.columns)]
else:
cores = [c[1] for c in list(data.columns)]
columns = diff(indexCols,['name'])
dataTable=Table(columns,cores)
section.addContent(dataTable)
dataForFunc=data.loc[name]
if type(dataForFunc) is pd.DataFrame:
bars={'cols':columns,'cores':cores,'data':[]}
for row in dataForFunc.itertuples():
row=list(row)
if type(row[0]) is int:
row=[row[0]] + row[1:]
else:
row=list(row[0]) + row[1:]
if field=="MAXREGCOEF":
newrow = row
newrow[len(columns):] = formatFloat(row[len(columns):])
row=newrow
dataTable.addRow(row)
bars['data'].append(row)
return(bars)
else:
if field=="MAXREGCOEF":
dataForFunc=formatFloat(dataForFunc)
dataTable.addRow(dataForFunc)
return(list(zip(cores,dataForFunc)))
def formatColumnName(c):
return("".join(joinit(c,":")))
def getCoresForHistory(benchTable,compilerid,typeid,testid,runid):
vals=(compilerid,typeid,testid,runid)
result=c.execute(coresForHistory % benchTable,vals).fetchall()
ids=[(x[0],x[1]) for x in list(result)]
return(ids)
def getCompilerForHistory(benchTable,coreid,typeid,testid,runid):
vals=(coreid,typeid,testid,runid)
result=c.execute(compilersForHistory % benchTable,vals).fetchall()
ids=[(x[0],x[1],x[2]) for x in list(result)]
return(ids)
def getHistory(desc,testid,indexCols):
benchName,sectionID,typeid,runid = desc
#print(benchName)
#print(sectionID)
#print(typeid)
#print(testid)
columns = diff(indexCols,['name'])
#print(columns)
if args.byc:
coreid=sectionID
compilerids=getCompilerForHistory(benchName,coreid,typeid,testid,runid)
series={}
for compilerid,compilername,version in compilerids:
result=getColNamesAndHistory(benchName,compilerid,coreid,typeid,testid)
#print("%s:%s" % (compilername,version))
maxpos = result[0].index('MAX')
lrunid = result[0].index('runid')
r=[[int(x[lrunid]),int(x[maxpos])] for x in result[1:][0]]
series[corename]=r
hist=History(series,runid)
return(hist)
else:
compilerid=sectionID
coreids = getCoresForHistory(benchName,compilerid,typeid,testid,runid)
series={}
for coreid,corename in coreids:
result=getColNamesAndHistory(benchName,compilerid,coreid,typeid,testid)
#print(corename)
maxpos = result[0].index('MAX')
corepos = result[0].index('core')
lrunid = result[0].index('runid')
r=[[int(x[lrunid]),int(x[maxpos])] for x in result[1:][0]]
series[corename]=r
hist=History(series,runid)
return(hist)
def convertRowToInt(r):
result=[]
for e in r:
if type(e) is float:
result.append(int(e))
else:
result.append(e)
return(result)
def addSectionComment(section):
if os.path.exists(args.comments):
fileName=re.sub(r'[() :]','',section.name)
path=os.path.join(args.comments,fileName+".html")
para=""
if os.path.exists(path):
commentSection = Section("Comments")
section.addSection(commentSection)
with open(path,"r") as r:
for l in r:
if l.strip():
para += l
else:
commentSection.addContent(Text(para))
para=""
if para:
commentSection.addContent(Text(para))
def formatTableBy(desc,byname,section,typeSection,testNames,cols,vals):
if vals.size != 0:
ref=pd.DataFrame(vals,columns=cols)
toSort=["name"]
for param in PARAMS:
if param in ref.columns:
ref[param]=pd.to_numeric(ref[param])
toSort.append(param)
if args.r:
# Regression table
ref['MAX']=pd.to_numeric(ref['MAX'])
ref['MAXREGCOEF']=pd.to_numeric(ref['MAXREGCOEF'])
indexCols=diff(cols,byname + ['Regression','MAXREGCOEF','MAX'] + section)
valList = ['Regression']
else:
ref['CYCLES']=pd.to_numeric(ref['CYCLES']).round(decimals=0)
indexCols=diff(cols,byname + ['CYCLES'] + section)
valList = ['CYCLES']
for name in testNames:
if args.r:
testSection = Section(name)
testSection.setTest()
typeSection.addSection(testSection)
addSectionComment(testSection)
maxCyclesSection = Section("Max cycles")
testSection.addSection(maxCyclesSection)
theCycles=regressionTableFor(byname,name,maxCyclesSection,ref,toSort,indexCols,'MAX')
if args.g:
if type(theCycles) is dict:
nbParams=len(theCycles['cols'])
for bar in theCycles['data']:
params=bar[0:nbParams]
values=bar[nbParams:]
title=[("%s=%s" % x) for x in list(zip(theCycles['cols'],params))]
title="".join(joinit(title," "))
sec=Section(title)
maxCyclesSection.addSection(sec)
values=list(zip(theCycles['cores'],values))
barChart=BarChart(values)
sec.addContent(barChart)
else:
#print(theCycles)
sec=Section("Graph")
maxCyclesSection.addSection(sec)
barChart=BarChart(theCycles)
sec.addContent(barChart)
#history=getHistory(desc,testid,indexCols)
#testSection.addContent(history)
regressionSection = Section("Regression")
testSection.addSection(regressionSection)
regressionTableFor(byname,name,regressionSection,ref,toSort,indexCols,'Regression')
maxRegCoefSection = Section("Max Reg Coef")
testSection.addSection(maxRegCoefSection)
regressionTableFor(byname,name,maxRegCoefSection,ref,toSort,indexCols,'MAXREGCOEF')
else:
data=ref.pivot_table(index=indexCols, columns=byname,
values=valList, aggfunc='first',fill_value="NA")
data=data.sort_values(toSort)
#print(list(data.columns))
testSection = Section(name)
testSection.setTest()
typeSection.addSection(testSection)
addSectionComment(testSection)
dataForFunc=data.loc[name]
dataForFunc = dataForFunc.dropna(axis=1)
columnsID = [formatColumnName(c[1:]) for c in list(dataForFunc.columns)]
columns = diff(indexCols,['name'])
dataTable=Table(columns,columnsID)
testSection.addContent(dataTable)
if type(dataForFunc) is pd.DataFrame:
for row in dataForFunc.itertuples():
if type(row[0]) is int:
row=list([row[0]] + list(row[1:]))
else:
row=list(row[0]) + list(row[1:])
dataTable.addRow(convertRowToInt(row))
else:
dataTable.addRow(convertRowToInt(dataForFunc))
referenceCoreID = None
refCore="""CREATE TEMP VIEW if not exists refCore AS
select * from %s where (coreid = %s) and (typeid = %s)
and %s
and compilerid = %s"""
otherCore="""CREATE TEMP VIEW if not exists otherCore AS
select * from %s where (typeid = %s)
and %s
and compilerid = %s"""
refCoreAllTypes="""CREATE TEMP VIEW if not exists refCore AS
select * from %s where (coreid = %s)
and %s
and compilerid = %s"""
otherCoreAllTypes="""CREATE TEMP VIEW if not exists otherCore AS
select * from %s where (coreid = %s)
and %s
and compilerid = %s"""
ratioSQL="""select name,otherCore.compilerid as compilerid,CORE.core as core,%s(CAST(otherCore.MAX as FLOAT) / CAST(refCore.MAX AS FLOAT)) AS RATIO
from otherCore
INNER JOIN refCore ON (refCore.categoryid = otherCore.categoryid
AND refCore.testnameid = otherCore.testnameid
AND refCore.typeid = otherCore.typeid
AND refCore.runid = otherCore.runid
AND refCore.compilerid = otherCore.compilerid
)
INNER JOIN TESTNAME ON TESTNAME.testnameid = refCore.testnameid
INNER JOIN CORE USING(coreid)
%s"""
ratioSQLAllTypes="""select name,otherCore.compilerid as compilerid,TYPE.type as type,%s(CAST(otherCore.MAX as FLOAT) / CAST(refCore.MAX AS FLOAT)) AS RATIO
from otherCore
INNER JOIN refCore ON (refCore.categoryid = otherCore.categoryid
AND refCore.testnameid = otherCore.testnameid
AND refCore.typeid = otherCore.typeid
AND refCore.runid = otherCore.runid
AND refCore.compilerid = otherCore.compilerid
)
INNER JOIN TESTNAME ON TESTNAME.testnameid = refCore.testnameid
INNER JOIN TYPE USING(typeid)
%s"""
ratioTestNamesSQL="""select distinct TESTNAME.name
from otherCore
INNER JOIN refCore ON (refCore.categoryid = otherCore.categoryid
AND refCore.testnameid = otherCore.testnameid
AND refCore.typeid = otherCore.typeid
AND refCore.runid = otherCore.runid
AND refCore.compilerid = otherCore.compilerid
)
INNER JOIN TESTNAME ON TESTNAME.testnameid = refCore.testnameid
INNER JOIN CORE USING(coreid)
%s"""
dropViewsRef="""drop view refCore"""
dropViewsOther="""drop view otherCore"""
def getTableParams(benchTable):
cursor=c.cursor()
result=cursor.execute("select * from %s limit 1" % (benchTable))
cols= [member[0] for member in cursor.description]
params=[]
for x in cols:
if x in PARAMS:
params.append(x)
return(params)
def computeRatio(benchName,viewParams,refMkViewCmd,otherMkViewCmd,byd):
params = getTableParams(benchName)
cols=["ratio"]
paramscmd=""
paramscols=""
paramsnames = ["refCore.%s as %s" % (x,x) for x in params]
paramseq = ["refCore.%s = otherCore.%s" % (x,x) for x in params]
if len(params) > 0:
cols = ["%s" % x for x in params]
cols.append("ratio")
paramscols= ("".join(joinit(paramsnames," , ")) + ",")
paramscmd = "WHERE " + "".join(joinit(paramseq," AND "))
if byd:
ratioCmd = ratioSQLAllTypes % (paramscols,paramscmd)
else:
ratioCmd = ratioSQL % (paramscols,paramscmd)
ratioTestNames = ratioTestNamesSQL % (paramscmd)
#print(refMkViewCmd)
#print(otherMkViewCmd)
#
#print(ratioCmd)
#
#print(dropViewsRef)
#print(dropViewsOther)
#quit()
c.execute(refMkViewCmd)
c.execute(otherMkViewCmd)
ratio=c.execute(ratioCmd).fetchall()
testNames=c.execute(ratioTestNames).fetchall()
testNames=[x[0] for x in testNames]
c.execute(dropViewsRef)
c.execute(dropViewsOther)
#print(ratio)
#quit()
if byd:
return(['name','compilerid','type'] + cols,params,ratio,testNames)
else:
return(['name','compilerid','core'] + cols,params,ratio,testNames)
# Compute for all core for a given type
def computeRatioTable(benchName,referenceCore,typeID,compiler):
viewParams = (benchName,referenceCore,typeID,runidVIEWcmd,compiler)
refMkViewCmd = refCore % viewParams
otherParams = (benchName,typeID,runidVIEWcmd,compiler)
otherMkViewCmd = otherCore % otherParams
#print(refMkViewCmd)
#print(otherMkViewCmd)
return(computeRatio(benchName,viewParams,refMkViewCmd,otherMkViewCmd,False))
def computeRatioTableForCore(benchName,referenceCore,otherCoreID,compiler):
viewParams = (benchName,referenceCore,runidVIEWcmd,compiler)
refMkViewCmd = refCoreAllTypes % viewParams
otherParams = (benchName,otherCoreID,runidVIEWcmd,compiler)
otherMkViewCmd = otherCoreAllTypes % otherParams
#print(refMkViewCmd)
#print(otherMkViewCmd)
return(computeRatio(benchName,viewParams,refMkViewCmd,otherMkViewCmd,True))
def formatPerfRatio(s):
result=[]
for t in s:
if type(t) is float:
if args.clamp:
if t > args.clampval:
t = args.clampval
if t < 0.0:
result.append("NA")
else:
result.append(("%.3f" % t))
else:
result.append(s)
return(result)
def addRatioTable(cols,params,data,section,testNames,byd):
ref=pd.DataFrame(data,columns=cols)
toSort=["name"] + params
for param in PARAMS:
if param in ref.columns:
ref[param]=pd.to_numeric(ref[param])
#print(testNames)
for name in testNames:
testSection = Section(name)
testSection.setTest()
section.addSection(testSection)
addSectionComment(testSection)
ratioSection = Section("Ratios")
testSection.addSection(ratioSection)
#print(toSort)
#print(ref)
if byd:
data=ref.pivot_table(index=toSort, columns=['type'],
values=["ratio"], aggfunc='first',fill_value=-1.0)
else:
data=ref.pivot_table(index=toSort, columns=['core'],
values=["ratio"], aggfunc='first')
data=data.sort_values(toSort)
#print(data)
dataForFunc=data.loc[name]
cores = [c[1] for c in list(data.columns)]
dataTable=Table(params,cores)
if args.g:
if type(dataForFunc) is not pd.DataFrame:
sec=Section("Graph")
testSection.addSection(sec)
barChart=BarChart(zip(cores,dataForFunc))
sec.addContent(barChart)
ratioSection.addContent(Text("A bigger ratio means the reference core \"%s\" is better" % refCoreName))
ratioSection.addContent(dataTable)
if type(dataForFunc) is pd.DataFrame:
for row in dataForFunc.itertuples():
row=list(row)
if type(row[0]) is int:
row=[row[0]] + formatPerfRatio(row[1:])
else:
row=list(row[0]) + formatPerfRatio(row[1:])
dataTable.addRow(row)
else:
row=list(dataForFunc)
dataTable.addRow(formatPerfRatio(row))
# Add a report for each table
def addReportFor(document,benchName):
nbElems = getNbElemsInBenchCmd(benchName)
if nbElems > 0:
categoryName = getCategoryName(benchName)
benchSection = Section(categoryName)
document.addSection(benchSection)
print("Process %s\n" % benchName)
if args.byd:
allCores=getAllExistingCores(benchName)
if args.ratio:
if keepCoreIds:
allCores=keepCoreIds
if ("%d" % referenceCoreID) in allCores:
allCores.remove("%d" % referenceCoreID)
for aCoreID in allCores:
nbElems = getNbElemsInBenchAndCoreCmd(benchName,aCoreID)
if nbElems > 0:
coreName=getCoreDesc(aCoreID)
coreSection = Section("%s" % coreName)
benchSection.addSection(coreSection)
allCompilers = getExistingCompilerForCore(benchName,aCoreID)
for compiler in allCompilers:
#print(compiler)
if args.ratio:
cols,params,ratios,testNames=computeRatioTableForCore(benchName,referenceCoreID,aCoreID,compiler)
#print(cols)
#print(ratios)
#print(" ")
if len(ratios)>0:
compilerName,version=getCompilerDesc(compiler)
compilerSection = Section("%s (%s)" % (compilerName,version))
coreSection.addSection(compilerSection)
addRatioTable(cols,params,ratios,compilerSection,testNames,True)
else:
nbElems = getNbElemsInBenchAndCoreAndCompilerCmd(benchName,compiler,aCoreID)
# Print test results for table, type, compiler
if nbElems > 0:
compilerName,version=getCompilerDesc(compiler)
compilerSection = Section("%s (%s)" % (compilerName,version))
coreSection.addSection(compilerSection)
cols,vals=getColNamesAndDataForCoreCompiler(benchName,compiler,aCoreID)
desc=(benchName,compiler,aCoreID)
names=getTestNamesForCoreCompiler(benchName,compiler,aCoreID)
formatTableBy(desc,['type'],['core','version','compiler'],compilerSection,names,cols,vals)
else:
allTypes = getExistingTypes(benchName)
# Add report for each type
for aTypeID in allTypes:
nbElems = getNbElemsInBenchAndTypeCmd(benchName,aTypeID)
if nbElems > 0:
typeName = getTypeName(aTypeID)
typeSection = Section(typeName)
benchSection.addSection(typeSection)
if args.byc:
## Add report for each core
allCores = getExistingCores(benchName,aTypeID)
for core in allCores:
#print(core)
nbElems = getNbElemsInBenchAndTypeAndCoreCmd(benchName,core,aTypeID)
# Print test results for table, type, compiler
if nbElems > 0:
coreName=getCoreDesc(core)
coreSection = Section("%s" % coreName)
typeSection.addSection(coreSection)
cols,vals=getColNamesAndDataForCore(benchName,core,aTypeID)
desc=(benchName,core,aTypeID)
names=getTestNamesForCore(benchName,core,aTypeID)
formatTableBy(desc,['compiler','version'],['core'],coreSection,names,cols,vals)
elif args.ratio:
allCompilers = getExistingCompiler(benchName,aTypeID)
for compiler in allCompilers:
cols,params,ratios,testNames=computeRatioTable(benchName,referenceCoreID,aTypeID,compiler)
#print(cols)
#print(ratios)
#print(" ")
if len(ratios)>0:
compilerName,version=getCompilerDesc(compiler)
compilerSection = Section("%s (%s)" % (compilerName,version))
typeSection.addSection(compilerSection)
addRatioTable(cols,params,ratios,compilerSection,testNames,False)
else:
## Add report for each compiler
allCompilers = getExistingCompiler(benchName,aTypeID)
for compiler in allCompilers:
#print(compiler)
nbElems = getNbElemsInBenchAndTypeAndCompilerCmd(benchName,compiler,aTypeID)
# Print test results for table, type, compiler
if nbElems > 0:
compilerName,version=getCompilerDesc(compiler)
compilerSection = Section("%s (%s)" % (compilerName,version))
typeSection.addSection(compilerSection)
cols,vals=getColNamesAndDataForCompiler(benchName,compiler,aTypeID)
desc=(benchName,compiler,aTypeID)
names=getTestNamesForCompiler(benchName,compiler,aTypeID)
formatTableBy(desc,['core'],['version','compiler'],compilerSection,names,cols,vals)
toc=[Hierarchy("BasicMathsBenchmarks"),
Hierarchy("ComplexMathsBenchmarks"),
Hierarchy("FastMath"),
Hierarchy("Filters",
[Hierarchy("FIR"),
Hierarchy("BIQUAD"),
Hierarchy("DECIM"),
Hierarchy("MISC")]),
Hierarchy("Support Functions",
[Hierarchy("Support"),
Hierarchy("SupportBar")]),
Hierarchy("Matrix Operations" ,
[Hierarchy("Binary"),
Hierarchy("Unary")]),
Hierarchy("Transform"),
Hierarchy("Stats"),
Hierarchy("Classical ML",[
Hierarchy("Bayes"),
Hierarchy("SVM"),
Hierarchy("Distance"),
Hierarchy("KalmanBenchmarks")
]),
]
processed=[]
def addComments(document):
if os.path.exists(args.comments):
section=Section("Measurement Context")
path=os.path.join(args.comments,"comments.txt")
document.addSection(section)
para=""
with open(path,"r") as r:
for l in r:
if l.strip():
para += l
else:
section.addContent(Text(para))
para=""
if para:
section.addContent(Text(para))
if args.ratio:
section.addContent(Text("Reference core for the ratio is %s" % refCoreName))
section.addContent(Text("A bigger ratio means the reference code is better"))
def processToc(d):
result=[]
for k in d:
if d[k] is not None:
result.append(Hierarchy(k,processToc(d[k])))
else:
result.append(Hierarchy(k))
return(result)
def createDoc(document,sections,benchtables):
global processed,referenceCoreID
for s in sections:
if s.name in benchtables:
addReportFor(document,s.name)
processed.append(s.name)
else:
section=Section(s.name)
document.addSection(section)
createDoc(section,s.sections,benchtables)
try:
benchtables=getBenchTables()
document = Document(None)
if args.ratio:
referenceCoreID= getCoreID(args.ref)
refCoreName=getCoreDesc(referenceCoreID)
addComments(document)
if args.toc:
with open(args.toc,"r") as f:
config=yaml.safe_load(f)
toc = processToc(config['root'])
#print(toc)
#quit()
createDoc(document,toc,benchtables)
misc=Section("Miscellaneous")
document.addSection(misc)
remaining=diff(benchtables,processed)
for bench in remaining:
addReportFor(misc,bench)
#for bench in benchtables:
# addReportFor(document,bench)
with open(args.o,"w") as output:
if args.t=="md":
document.accept(Markdown(output))
if args.t=="html":
reorder=NORMALFORMAT
if args.byc:
reorder=BYCFORMAT
if args.byd:
reorder=BYDFORMAT
document.accept(HTML(output,args.r,args.ratio,reorder))
finally:
c.close()
|
|
"""Code run on the client side for unstaging complete Pulsar jobs."""
import fnmatch
from contextlib import contextmanager
from logging import getLogger
from os.path import join, relpath
from json import loads
from ..action_mapper import FileActionMapper
from ..staging import COMMAND_VERSION_FILENAME
log = getLogger(__name__)
def finish_job(client, cleanup_job, job_completed_normally, client_outputs, pulsar_outputs):
"""Process for "un-staging" a complete Pulsar job.
This function is responsible for downloading results from remote
server and cleaning up Pulsar staging directory (if needed.)
"""
collection_failure_exceptions = []
if job_completed_normally:
output_collector = ClientOutputCollector(client)
action_mapper = FileActionMapper(client)
results_stager = ResultsCollector(output_collector, action_mapper, client_outputs, pulsar_outputs)
collection_failure_exceptions = results_stager.collect()
_clean(collection_failure_exceptions, cleanup_job, client)
return collection_failure_exceptions
class ClientOutputCollector:
def __init__(self, client):
self.client = client
def collect_output(self, results_collector, output_type, action, name):
# This output should have been handled by the Pulsar.
if not action.staging_action_local:
return False
working_directory = results_collector.client_outputs.working_directory
self.client.fetch_output(
path=action.path,
name=name,
working_directory=working_directory,
output_type=output_type,
action_type=action.action_type
)
return True
class ResultsCollector:
def __init__(self, output_collector, action_mapper, client_outputs, pulsar_outputs):
self.output_collector = output_collector
self.action_mapper = action_mapper
self.client_outputs = client_outputs
self.pulsar_outputs = pulsar_outputs
self.downloaded_working_directory_files = []
self.exception_tracker = DownloadExceptionTracker()
self.output_files = client_outputs.output_files
self.working_directory_contents = pulsar_outputs.working_directory_contents or []
self.metadata_directory_contents = pulsar_outputs.metadata_directory_contents or []
self.job_directory_contents = pulsar_outputs.job_directory_contents or []
def collect(self):
self.__collect_working_directory_outputs()
self.__collect_outputs()
self.__collect_version_file()
self.__collect_other_working_directory_files()
self.__collect_metadata_directory_files()
self.__collect_job_directory_files()
return self.exception_tracker.collection_failure_exceptions
def __collect_working_directory_outputs(self):
working_directory = self.client_outputs.working_directory
# Fetch explicit working directory outputs.
for source_file, output_file in self.client_outputs.work_dir_outputs:
name = relpath(source_file, working_directory)
if name not in self.working_directory_contents:
# Could be a glob
matching = fnmatch.filter(self.working_directory_contents, name)
if matching:
name = matching[0]
source_file = join(working_directory, name)
pulsar = self.pulsar_outputs.path_helper.remote_name(name)
if self._attempt_collect_output('output_workdir', path=output_file, name=pulsar):
self.downloaded_working_directory_files.append(pulsar)
# Remove from full output_files list so don't try to download directly.
try:
self.output_files.remove(output_file)
except ValueError:
raise Exception("Failed to remove {} from {}".format(output_file, self.output_files))
def __collect_outputs(self):
# Legacy Pulsar not returning list of files, iterate over the list of
# expected outputs for tool.
for output_file in self.output_files:
# Fetch output directly...
output_generated = self.pulsar_outputs.has_output_file(output_file)
if output_generated:
self._attempt_collect_output('output', output_file)
for galaxy_path, pulsar in self.pulsar_outputs.output_extras(output_file).items():
self._attempt_collect_output('output', path=galaxy_path, name=pulsar)
# else not output generated, do not attempt download.
def __collect_version_file(self):
version_file = self.client_outputs.version_file
pulsar_output_directory_contents = self.pulsar_outputs.output_directory_contents
if version_file and COMMAND_VERSION_FILENAME in pulsar_output_directory_contents:
self._attempt_collect_output('output', version_file, name=COMMAND_VERSION_FILENAME)
def __collect_other_working_directory_files(self):
self.__collect_directory_files(
self.client_outputs.working_directory,
self.working_directory_contents,
'output_workdir',
)
def __collect_metadata_directory_files(self):
self.__collect_directory_files(
self.client_outputs.metadata_directory,
self.metadata_directory_contents,
'output_metadata',
)
def __collect_job_directory_files(self):
self.__collect_directory_files(
self.client_outputs.job_directory,
self.job_directory_contents,
'output_jobdir',
)
def __realized_dynamic_file_source_references(self):
references = []
def record_references(from_dict):
if isinstance(from_dict, list):
for v in from_dict:
record_references(v)
elif isinstance(from_dict, dict):
for k, v in from_dict.items():
if k == "filename":
references.append(v)
if isinstance(v, (list, dict)):
record_references(v)
def parse_and_record_references(json_content):
try:
as_dict = loads(json_content)
record_references(as_dict)
except Exception as e:
log.warning("problem parsing galaxy.json %s" % e)
pass
realized_dynamic_file_sources = (self.pulsar_outputs.realized_dynamic_file_sources or [])
for realized_dynamic_file_source in realized_dynamic_file_sources:
contents = realized_dynamic_file_source["contents"]
source_type = realized_dynamic_file_source["type"]
assert source_type in ["galaxy", "legacy_galaxy"], source_type
if source_type == "galaxy":
parse_and_record_references(contents)
else:
for line in contents.splitlines():
parse_and_record_references(line)
return references
def __collect_directory_files(self, directory, contents, output_type):
if directory is None: # e.g. output_metadata_directory
return
dynamic_file_source_references = self.__realized_dynamic_file_source_references()
# Fetch remaining working directory outputs of interest.
for name in contents:
collect = False
if name in self.downloaded_working_directory_files:
continue
if self.client_outputs.dynamic_match(name):
collect = True
elif name in dynamic_file_source_references:
collect = True
if collect:
log.debug("collecting dynamic {} file {}".format(output_type, name))
output_file = join(directory, self.pulsar_outputs.path_helper.local_name(name))
if self._attempt_collect_output(output_type=output_type, path=output_file, name=name):
self.downloaded_working_directory_files.append(name)
def _attempt_collect_output(self, output_type, path, name=None):
# path is final path on galaxy server (client)
# name is the 'name' of the file on the Pulsar server (possible a relative)
# path.
collected = False
with self.exception_tracker():
action = self.action_mapper.action({"path": path}, output_type)
if self._collect_output(output_type, action, name):
collected = True
return collected
def _collect_output(self, output_type, action, name):
log.info("collecting output {} with action {}".format(name, action))
try:
return self.output_collector.collect_output(self, output_type, action, name)
except Exception as e:
if _allow_collect_failure(output_type):
log.warning(
"Allowed failure in postprocessing, will not force job failure but generally indicates a tool"
f" failure: {e}")
else:
raise
class DownloadExceptionTracker:
def __init__(self):
self.collection_failure_exceptions = []
@contextmanager
def __call__(self):
try:
yield
except Exception as e:
self.collection_failure_exceptions.append(e)
def _clean(collection_failure_exceptions, cleanup_job, client):
failed = (len(collection_failure_exceptions) > 0)
do_clean = (not failed and cleanup_job != "never") or cleanup_job == "always"
if do_clean:
message = "Cleaning up job (failed [%s], cleanup_job [%s])"
else:
message = "Skipping job cleanup (failed [%s], cleanup_job [%s])"
log.debug(message % (failed, cleanup_job))
if do_clean:
try:
client.clean()
except Exception:
log.warn("Failed to cleanup remote Pulsar job")
def _allow_collect_failure(output_type):
return output_type in ['output_workdir']
__all__ = ('finish_job',)
|
|
from __future__ import unicode_literals
import collections
from django.contrib import auth
from django.contrib.auth import models as auth_models
from django.core import exceptions
from django.utils import six
from .. import core
__all__ = (
"LoginRequired", "ActiveRequired", "StaffRequired", "SuperuserRequired",
"GroupsRequired", "PermissionsRequired", "ObjectPermissionsRequired",
"TestRequired",)
class LoginRequired(core.behaviors.Denial):
"""
A view behavior that tests whether the user is authenticated.
If the login_required attribute is falsy, the login requirement testing
will be disabled. Its initial value is True.
Set the login_required_* attributes to configure the behavior when a login
is required in order for the user to proceed. See
daydreamer.views.core.behaviors.Denial for the attributes' documentation.
"""
login_required = True
login_required_raise = False
login_required_exception = None
login_required_message = None
login_required_message_level = None
login_required_message_tags = None
login_required_redirect_url = None
login_required_redirect_next_url = None
login_required_redirect_next_name = auth.REDIRECT_FIELD_NAME
def get_login_required(self):
"""
A hook to override the login_required value.
The default implementation returns self.login_required.
"""
return self.login_required
def login_required_test(self):
"""
A hook to override the way that the login requirement test is peformed.
"""
return (
not self.get_login_required() or
self.request.user.is_authenticated())
def login_required_denied(self, request, *args, **kwargs):
"""
The handler called upon login requirement test failure.
"""
return self.deny("login_required")
def get_deny_handler(self):
"""
Returns self.login_required_denied when the login requirement
test fails, falling back to super().
"""
return (
not self.login_required_test() and
self.login_required_denied or
super(LoginRequired, self).get_deny_handler())
class ActiveRequired(core.behaviors.Denial):
"""
A view behavior that tests whether the user is active.
If the active_required attribute is falsy, the active requirement testing
will be disabled. Its initial value is True.
Set the active_required_* attributes to configure the behavior when an
active user is required in order for the user to proceed. See
daydreamer.views.core.behaviors.Denial for the attributes' documentation.
"""
active_required = True
active_required_raise = False
active_required_exception = None
active_required_message = None
active_required_message_level = None
active_required_message_tags = None
active_required_redirect_url = None
active_required_redirect_next_url = None
active_required_redirect_next_name = auth.REDIRECT_FIELD_NAME
def get_active_required(self):
"""
A hook to override the active_required value.
The default implementation returns self.active_required.
"""
return self.active_required
def active_required_test(self):
"""
A hook to override the way that the active requirement test
is performed.
"""
return (
not self.get_active_required() or
self.request.user.is_active)
def active_required_denied(self, request, *args, **kwargs):
"""
The handler called upon active requirement test failure.
"""
return self.deny("active_required")
def get_deny_handler(self):
"""
Returns self.active_required_denied when the active requirement
test fails, falling back to super().
"""
return (
not self.active_required_test() and
self.active_required_denied or
super(ActiveRequired, self).get_deny_handler())
class StaffRequired(core.behaviors.Denial):
"""
A view behavior that tests whether the user is a staff member.
If the staff_required attribute is falsy, the staff requirement testing
will be disabled. Its initial value is True.
Set the staff_required_* attributes to configure the behavior when a
staff user is required in order for the user to proceed. See
daydreamer.views.core.behaviors.Denial for the attributes' documentation.
"""
staff_required = True
staff_required_raise = False
staff_required_exception = None
staff_required_message = None
staff_required_message_level = None
staff_required_message_tags = None
staff_required_redirect_url = None
staff_required_redirect_next_url = None
staff_required_redirect_next_name = auth.REDIRECT_FIELD_NAME
def get_staff_required(self):
"""
A hook to override the staff_required value.
The default implementation returns self.staff_required.
"""
return self.staff_required
def staff_required_test(self):
"""
A hook to override the way that the active requirement test
is performed.
"""
return (
not self.get_staff_required() or
self.request.user.is_staff)
def staff_required_denied(self, request, *args, **kwargs):
"""
The handler called upon staff requirement test failure.
"""
return self.deny("staff_required")
def get_deny_handler(self):
"""
Returns self.staff_required_denied when the staff requirement
test fails, falling back to super().
"""
return (
not self.staff_required_test() and
self.staff_required_denied or
super(StaffRequired, self).get_deny_handler())
class SuperuserRequired(core.behaviors.Denial):
"""
A view behavior that tests whether the user is a superuser.
If the superuser_required attribute is falsy, the superuser requirement
testing will be disabled. Its initial value is True.
Set the superuser_required_* attributes to configure the behavior when
a superuser is required in order for the user to proceed. See
daydreamer.views.core.behaviors.Denial for the attributes' documentation.
"""
superuser_required = True
superuser_required_raise = False
superuser_required_exception = None
superuser_required_message = None
superuser_required_message_level = None
superuser_required_message_tags = None
superuser_required_redirect_url = None
superuser_required_redirect_next_url = None
superuser_required_redirect_next_name = auth.REDIRECT_FIELD_NAME
def get_superuser_required(self):
"""
A hook to override the superuser_required value.
The default implementation returns self.superuser_required.
"""
return self.superuser_required
def superuser_required_test(self):
"""
A hook to override the way that the superuser requirement test
is performed.
"""
return (
not self.get_superuser_required() or
self.request.user.is_superuser)
def superuser_required_denied(self, request, *args, **kwargs):
"""
The handler called upon superuser requirement test failure.
"""
return self.deny("superuser_required")
def get_deny_handler(self):
"""
Returns self.superuser_required_denied when the superuser
requirement test fails, falling back to super().
"""
return (
not self.superuser_required_test() and
self.superuser_required_denied or
super(SuperuserRequired, self).get_deny_handler())
class GroupsRequired(core.behaviors.Denial):
"""
A view behavior that tests whether the user is in a set of groups.
If the groups_required attribute is falsy, the groups requirement testing
will be disabled. Its initial value is None. The groups_required attribute
can be either a single value or an iterable of values. Each value should be
a group name or a django.contrib.auth.models.Group object. When named
groups are specified, the corresponding Groups must exist, or a
django.core.exceptions.ImproperlyConfigured exception will be raised.
If any values is not a string or a Group object, a ValueError will
be raised.
Set the groups_required_* attribute to configure the behavior when a set
of groups is required in order for the user to proceed. See
daydreamer.views.core.behaviors.Denial for the attributes' documentation.
"""
groups_required = None
groups_required_raise = False
groups_required_exception = None
groups_required_message = None
groups_required_message_level = None
groups_required_message_tags = None
groups_required_redirect_url = None
groups_required_redirect_next_url = None
groups_required_redirect_next_name = auth.REDIRECT_FIELD_NAME
def get_groups_required(self):
"""
A hook to override the groups_required_value.
The default implementation normalizes the groups into a set
of primary keys. If the groups_required attribute includes a
value that is not a group name or a
django.contrib.auth.models.Group object, a ValueError will be raised.
If any group names do not exist in the database, a
django.core.exceptions.ImproperlyConfigured exception will be raised.
"""
# Normalize single instances to tuples.
groups = self.groups_required or set()
if isinstance(groups, six.string_types + (auth_models.Group,)):
groups = (groups,)
elif not isinstance(groups, collections.Iterable):
raise ValueError(
"The value {value!r} specified for groups_required is not a "
"group name, nor a Group nor an iterable of groups.".format(
value=groups))
# Resolve the group names and Group objects into existing Groups'
# primary keys.
if groups:
# Filter the groups into buckets by type.
named_groups = set()
actual_groups = set()
for group in groups:
if isinstance(group, six.string_types):
named_groups.add(group)
elif isinstance(group, auth_models.Group):
actual_groups.add(group)
else:
raise ValueError(
"A value {value!r} specified in groups_required "
"is not a group name or a Group.".format(value=group))
# Resolve the named groups and perform the sanity check.
resolved_groups = set(
auth_models.Group.objects
.filter(name__in=named_groups)
.values_list("pk", flat=True))
if len(named_groups) != len(resolved_groups):
raise exceptions.ImproperlyConfigured(
"One or more group names specified in groups_required "
"does not exist.")
# Gather all the groups' primary keys.
groups = resolved_groups | set(group.pk for group in actual_groups)
return groups
def groups_required_test(self):
"""
A hook to override the way that the groups requirement test
is performed.
"""
groups = self.get_groups_required()
return (
not groups or
self.request.user.groups.filter(
pk__in=groups).count() == len(groups))
def groups_required_denied(self, request, *args, **kwargs):
"""
The handler called upon groups requirement test failure.
"""
return self.deny("groups_required")
def get_deny_handler(self):
"""
Returns self.groups_required_denied when the groups
requirement test fails, falling back to super().
"""
return (
not self.groups_required_test() and
self.groups_required_denied or
super(GroupsRequired, self).get_deny_handler())
class PermissionsRequired(core.behaviors.Denial):
"""
A view behavior that tests whether the user has a set
of permissions.
If the permissions_required attribute is falsy, the permissions requirement
testing will be disabled. Its initial value is None. The permissions_required
attribute can be either a single permission name or an iterable of
permission names.
Set the permissions_required_* attributes to configure the behavior when
permissions are required in order for the user to proceed.
See daydreamer.views.core.behaviors.Denial for the attributes' documentation.
"""
permissions_required = None
permissions_required_raise = False
permissions_required_exception = None
permissions_required_message = None
permissions_required_message_level = None
permissions_required_message_tags = None
permissions_required_redirect_url = None
permissions_required_redirect_next_url = None
permissions_required_redirect_next_name = auth.REDIRECT_FIELD_NAME
def get_permissions_required(self):
"""
A hook to override the permissions_required value.
The default implementation returns the value of
self.permissions_required, where a single value is normalized as a
tuple. If any value in permissions_required is not a permission name, a
ValueError will be raised.
"""
# Normalize single values to a tuple.
permissions = self.permissions_required or ()
if isinstance(permissions, six.string_types):
permissions = (permissions,)
elif not isinstance(permissions, collections.Iterable):
raise ValueError(
"The permssions_required value is neither a permission name "
"nor an iterable of permission names.")
# Sanity check.
if (permissions and
any(not isinstance(permission, six.string_types)
for permission in permissions)):
raise ValueError(
"One or more values in permissions_required is not a "
"permission name.")
return permissions
def permissions_required_test(self):
"""
A hook to override the way that the permissions requirement test
is performed.
"""
permissions = self.get_permissions_required()
return (
not permissions or
self.request.user.has_perms(permissions))
def permissions_required_denied(self, request, *args, **kwargs):
"""
The handler called upon permissions requirement test failure.
"""
return self.deny("permissions_required")
def get_deny_handler(self):
"""
Returns self.permissions_required_denied when the permissions
requirement test fails, falling back to super().
"""
return (
not self.permissions_required_test() and
self.permissions_required_denied or
super(PermissionsRequired, self).get_deny_handler())
class ObjectPermissionsRequired(core.behaviors.Denial):
"""
A view behavior that tests whether the user has a set of permissions
for a particular object.
If either of the object_permissions_required attribute or the
object_permissions_required_object attributes is falsy, the permissions
requirement testing will be disabled. Initial values for these attributes
are None. The object_permissions_required attribute can be either a single
permission name or an iterable of permission names. The
object_permissions_required_object attribute will typically be implemented
as a property that returns some object retrieved from the database.
Set the object_permissions_required_* attributes to configure the behavior
when permissions are required for an object in order for the user to
proceed. See daydreamer.views.core.behaviors.Denial for the
attributes' documentation.
"""
object_permissions_required = None
object_permissions_required_object = None
object_permissions_required_raise = False
object_permissions_required_exception = None
object_permissions_required_message = None
object_permissions_required_message_level = None
object_permissions_required_message_tags = None
object_permissions_required_redirect_url = None
object_permissions_required_redirect_next_url = None
object_permissions_required_redirect_next_name = auth.REDIRECT_FIELD_NAME
def get_object_permissions_required(self):
"""
A hook to override the object_permissions_required value.
The default implementation returns the value of
self.object_permissions_required, where a single value is normalized as
a tuple. If any value in object_permissions_required is not a
permission name, a ValueError will be raised.
"""
# Normalize single values to a tuple.
permissions = self.object_permissions_required or ()
if isinstance(permissions, six.string_types):
permissions = (permissions,)
elif not isinstance(permissions, collections.Iterable):
raise ValueError(
"The object_permssions_required value is neither a "
"permission name nor an iterable of permission names.")
# Sanity check.
if (permissions and
any(not isinstance(permission, six.string_types)
for permission in permissions)):
raise ValueError(
"One or more values in object_permissions_required is not a "
"permission name.")
return permissions
def get_object_permissions_required_object(self):
"""
A hook to override the object_permissions_required_object value.
The default implementation returns
self.object_permissions_required_object.
"""
return self.object_permissions_required_object
def object_permissions_required_test(self):
"""
A hook to override the way that the object permissions requirement test
is performed.
"""
permissions = self.get_object_permissions_required()
obj = self.get_object_permissions_required_object()
return (
(not all((permissions, obj,))) or
self.request.user.has_perms(permissions, obj=obj))
def object_permissions_required_denied(self, request, *args, **kwargs):
"""
The handler called upon object permissions requirement test failure.
"""
return self.deny("object_permissions_required")
def get_deny_handler(self):
"""
Returns self.object_permissions_required_denied when the object
permissions test fails, falling back to super().
"""
return (
not self.object_permissions_required_test() and
self.object_permissions_required_denied or
super(ObjectPermissionsRequired, self).get_deny_handler())
class TestRequired(core.behaviors.Denial):
"""
A view behavior that performs a test against the current request,
typically a predicate for self.request.user.
If the test_required attribute is not a callable, the test requirement
will be disabled. Its initial value is None.
Set the test_required_* attributes to configure the behavior when a
test must be passed in order for the user to proceed. See
daydreamer.views.core.behaviors.Denial for the attributes' documentation.
"""
test_required = None
test_required_raise = False
test_required_exception = None
test_required_message = None
test_required_message_level = None
test_required_message_tags = None
test_required_redirect_url = None
test_required_redirect_next_url = None
test_required_redirect_next_name = auth.REDIRECT_FIELD_NAME
def get_test_required(self):
"""
A hook to override the test_required value.
The default implementation returns self.test_required.
"""
return self.test_required
def test_required_test(self):
"""
A hook to override the way that the required test is performed.
"""
test = self.get_test_required()
return (
not isinstance(test, collections.Callable) or
test())
def test_required_denied(self, request, *args, **kwargs):
"""
The handler called upon test failure.
"""
return self.deny("test_required")
def get_deny_handler(self):
"""
Returns self.test_required_denied when the test requirement fails,
falling back to super().
"""
return (
not self.test_required_test() and
self.test_required_denied or
super(TestRequired, self).get_deny_handler())
|
|
#!/usr/bin/env python2
"""
Copyright (C) 2014, Michael Trunner
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of {{ project }} nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import argparse
import datetime
import wiringpi2
wiringpi2.wiringPiSetup()
class TevionCode(object):
_delta_long = 1200
_delta_short = 600
WIRINGPI_OUTPUT_MODE = 1
COMMAND_CODES = {
0: {
'on': (170, 85),
'off': (169, 86),
'brighter': (169, 154),
'darker': (170, 153)
},
1: {
'on': (86, 86),
'off': (85, 85),
'brighter': (85, 153),
'darker': (86, 154)
},
2: {
'on': (150, 90),
'off': (149, 89),
'brighter': (149, 149),
'darker': (150, 150)
},
3: {
'on': (166, 89),
'off': (165, 90),
'brighter': (165, 150),
'darker': (166, 149)
},
4: {
'on': (102, 85),
'off': (101, 86),
'brighter': (101, 154),
'darker': (102, 153)
}
}
def __init__(self, house_code, pin, adj=1):
self.set_house_code(house_code)
self.pin = pin
self.pin_value = 0
self._init_wiringpi()
self.adj = adj
self.toggles = 0
self.duration = 0
def _init_wiringpi(self):
"""
Initializes the wiringpi pin of the 433 module
"""
wiringpi2.pinMode(self.pin, self.WIRINGPI_OUTPUT_MODE)
def get_controll_code(self, outlet_no, command):
"""
Returns the tevion controll code of the given command for
the given remote outlet.
:return: command
:rtype: tuple
"""
return self.COMMAND_CODES[outlet_no][command]
def _toggle_pin_value(self):
"""
Toggles the internal pin state
"""
if self.pin_value == 1:
self.pin_value = 0
else:
self.pin_value = 1
return self.pin_value
def _get_long_delta(self):
"""
Returns the adjusted delta for a long signal (logical one)
"""
return int(self._delta_long * self.adj)
def _get_short_delta(self):
"""
Returns the adjusted delta for a short signal (logical zero)
"""
return int(self._delta_short * self.adj)
def _send_bit(self, value):
"""
Sends the given logical bit
"""
wiringpi2.digitalWrite(self.pin, self._toggle_pin_value())
if value:
wiringpi2.delayMicroseconds(self._get_long_delta())
self.duration += self._delta_long
else:
wiringpi2.delayMicroseconds(self._get_short_delta())
self.duration += self._delta_short
self.toggles += 1
def set_house_code(self, house_code):
"""
Calculates and sets the internal representation of
the tevion house code.
"""
h = []
for n in house_code:
h.extend(self._bitfield(n))
h.append(1) # Parity hack!?!
self._house_code = h
def _bitfield(self, n):
return [1 if digit == '1' else 0 for digit in '{0:08b}'.format(n)]
def _send_house_code(self):
for h in self._house_code:
self._send_bit(h)
def send_code(self, code):
"""
Sends the given code (tuple)
"""
self._send_house_code()
for c in code:
for bit in self._bitfield(c):
self._send_bit(bit)
def send_command(self, outlet_no, command):
"""
Sends the given command code for the given remote outlet.
"""
self.send_code(self.get_controll_code(outlet_no, command))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Sends 433Mhz codes for the tevion remote control outlet')
parser.add_argument(
'-p', '--pin', type=int, default=1,
help='Number of the wiringpi pin that should be used',
)
parser.add_argument(
'-hc', '--housecode', type=int, nargs=3, default=[77, 42, 170],
help='The Tevion house code of the outlets.\nDefault is 77, 42, 170.')
parser.add_argument(
'command', type=str, choices=['on', 'off', 'brighter', 'darker'])
parser.add_argument(
'outlet', type=int, nargs='?', default=0, choices=range(1, 5),
help='Number of the power outlet, or all if omitted')
parser.add_argument(
'-r', '--repeat', type=int, default=5,
help='Number of time the given code should be send.\n Default is 5.')
parser.add_argument('-d', '--debug', action="store_true",
help='Activates debug output')
parser.add_argument('--adj', type=float, default=1,
help='Adjust the sending speed.')
args = parser.parse_args()
start_time = datetime.datetime.now()
tevion = TevionCode(args.housecode, args.pin, args.adj)
for _i in range(args.repeat):
tevion.send_command(args.outlet, args.command)
if args.debug:
print (datetime.datetime.now() - start_time).total_seconds() * 1000000
print tevion.duration
print tevion.toggles
|
|
#!/usr/bin/python3
#
# Copyright (C) 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.utils.process"""
import os
import select
import shutil
import signal
import stat
import subprocess
import tempfile
import time
import unittest
from ganeti import constants
from ganeti import utils
from ganeti import errors
import testutils
class TestIsProcessAlive(unittest.TestCase):
"""Testing case for IsProcessAlive"""
def testExists(self):
mypid = os.getpid()
self.assertTrue(utils.IsProcessAlive(mypid), "can't find myself running")
def testNotExisting(self):
pid_non_existing = os.fork()
if pid_non_existing == 0:
os._exit(0)
elif pid_non_existing < 0:
raise SystemError("can't fork")
os.waitpid(pid_non_existing, 0)
self.assertFalse(utils.IsProcessAlive(pid_non_existing),
"nonexisting process detected")
class TestGetProcStatusPath(unittest.TestCase):
def test(self):
self.assertTrue("/1234/" in utils.process._GetProcStatusPath(1234))
self.assertNotEqual(utils.process._GetProcStatusPath(1),
utils.process._GetProcStatusPath(2))
class TestIsProcessHandlingSignal(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testParseSigsetT(self):
parse_sigset_t_fn = utils.process._ParseSigsetT
self.assertEqual(len(parse_sigset_t_fn("0")), 0)
self.assertEqual(parse_sigset_t_fn("1"), set([1]))
self.assertEqual(parse_sigset_t_fn("1000a"), set([2, 4, 17]))
self.assertEqual(parse_sigset_t_fn("810002"), set([2, 17, 24, ]))
self.assertEqual(parse_sigset_t_fn("0000000180000202"),
set([2, 10, 32, 33]))
self.assertEqual(parse_sigset_t_fn("0000000180000002"),
set([2, 32, 33]))
self.assertEqual(parse_sigset_t_fn("0000000188000002"),
set([2, 28, 32, 33]))
self.assertEqual(parse_sigset_t_fn("000000004b813efb"),
set([1, 2, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 17,
24, 25, 26, 28, 31]))
self.assertEqual(parse_sigset_t_fn("ffffff"), set(range(1, 25)))
def testGetProcStatusField(self):
for field in ["SigCgt", "Name", "FDSize"]:
for value in ["", "0", "cat", " 1234 KB"]:
pstatus = "\n".join([
"VmPeak: 999 kB",
"%s: %s" % (field, value),
"TracerPid: 0",
])
result = utils.process._GetProcStatusField(pstatus, field)
self.assertEqual(result, value.strip())
def test(self):
sp = utils.PathJoin(self.tmpdir, "status")
utils.WriteFile(sp, data="\n".join([
"Name: bash",
"State: S (sleeping)",
"SleepAVG: 98%",
"Pid: 22250",
"PPid: 10858",
"TracerPid: 0",
"SigBlk: 0000000000010000",
"SigIgn: 0000000000384004",
"SigCgt: 000000004b813efb",
"CapEff: 0000000000000000",
]))
self.assertTrue(utils.IsProcessHandlingSignal(1234, 10, status_path=sp))
def testNoSigCgt(self):
sp = utils.PathJoin(self.tmpdir, "status")
utils.WriteFile(sp, data="\n".join([
"Name: bash",
]))
self.assertRaises(RuntimeError, utils.IsProcessHandlingSignal,
1234, 10, status_path=sp)
def testNoSuchFile(self):
sp = utils.PathJoin(self.tmpdir, "notexist")
self.assertFalse(utils.IsProcessHandlingSignal(1234, 10, status_path=sp))
@staticmethod
def _TestRealProcess():
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
if utils.IsProcessHandlingSignal(os.getpid(), signal.SIGUSR1):
raise Exception("SIGUSR1 is handled when it should not be")
signal.signal(signal.SIGUSR1, lambda signum, frame: None)
if not utils.IsProcessHandlingSignal(os.getpid(), signal.SIGUSR1):
raise Exception("SIGUSR1 is not handled when it should be")
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
if utils.IsProcessHandlingSignal(os.getpid(), signal.SIGUSR1):
raise Exception("SIGUSR1 is not handled when it should be")
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
if utils.IsProcessHandlingSignal(os.getpid(), signal.SIGUSR1):
raise Exception("SIGUSR1 is handled when it should not be")
return True
def testRealProcess(self):
self.assertTrue(utils.RunInSeparateProcess(self._TestRealProcess))
class _PostforkProcessReadyHelper:
"""A helper to use with C{postfork_fn} in RunCmd.
It makes sure a process has reached a certain state by reading from a fifo.
@ivar write_fd: The fd number to write to
"""
def __init__(self, timeout):
"""Initialize the helper.
@param fifo_dir: The dir where we can create the fifo
@param timeout: The time in seconds to wait before giving up
"""
self.timeout = timeout
(self.read_fd, self.write_fd) = os.pipe()
def Ready(self, pid):
"""Waits until the process is ready.
@param pid: The pid of the process
"""
(read_ready, _, _) = select.select([self.read_fd], [], [], self.timeout)
if not read_ready:
# We hit the timeout
raise AssertionError("Timeout %d reached while waiting for process %d"
" to become ready" % (self.timeout, pid))
def Cleanup(self):
"""Cleans up the helper.
"""
os.close(self.read_fd)
os.close(self.write_fd)
class TestRunCmd(testutils.GanetiTestCase):
"""Testing case for the RunCmd function"""
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.magic = time.ctime() + " ganeti test"
self.fname = self._CreateTempFile()
self.fifo_tmpdir = tempfile.mkdtemp()
self.fifo_file = os.path.join(self.fifo_tmpdir, "ganeti_test_fifo")
os.mkfifo(self.fifo_file)
# If the process is not ready after 20 seconds we have bigger issues
self.proc_ready_helper = _PostforkProcessReadyHelper(20)
def tearDown(self):
self.proc_ready_helper.Cleanup()
shutil.rmtree(self.fifo_tmpdir)
testutils.GanetiTestCase.tearDown(self)
def testOk(self):
"""Test successful exit code"""
result = utils.RunCmd("/bin/sh -c 'exit 0'")
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, "")
def testFail(self):
"""Test fail exit code"""
result = utils.RunCmd("/bin/sh -c 'exit 1'")
self.assertEqual(result.exit_code, 1)
self.assertEqual(result.output, "")
def testStdout(self):
"""Test standard output"""
cmd = 'echo -n "%s"' % self.magic
result = utils.RunCmd("/bin/sh -c '%s'" % cmd)
self.assertEqual(result.stdout, self.magic)
result = utils.RunCmd("/bin/sh -c '%s'" % cmd, output=self.fname)
self.assertEqual(result.output, "")
self.assertFileContent(self.fname, self.magic)
def testStderr(self):
"""Test standard error"""
cmd = 'echo -n "%s"' % self.magic
result = utils.RunCmd("/bin/sh -c '%s' 1>&2" % cmd)
self.assertEqual(result.stderr, self.magic)
result = utils.RunCmd("/bin/sh -c '%s' 1>&2" % cmd, output=self.fname)
self.assertEqual(result.output, "")
self.assertFileContent(self.fname, self.magic)
def testCombined(self):
"""Test combined output"""
cmd = 'echo -n "A%s"; echo -n "B%s" 1>&2' % (self.magic, self.magic)
expected = "A" + self.magic + "B" + self.magic
result = utils.RunCmd("/bin/sh -c '%s'" % cmd)
self.assertEqual(result.output, expected)
result = utils.RunCmd("/bin/sh -c '%s'" % cmd, output=self.fname)
self.assertEqual(result.output, "")
self.assertFileContent(self.fname, expected)
def testSignal(self):
"""Test signal"""
result = utils.RunCmd(["python3", "-c",
"import os; os.kill(os.getpid(), 15)"])
self.assertEqual(result.signal, 15)
self.assertEqual(result.output, "")
def testTimeoutFlagTrue(self):
result = utils.RunCmd(["sleep", "2"], timeout=0.1)
self.assertTrue(result.failed)
self.assertTrue(result.failed_by_timeout)
def testTimeoutFlagFalse(self):
result = utils.RunCmd(["false"], timeout=5)
self.assertTrue(result.failed)
self.assertFalse(result.failed_by_timeout)
def testTimeoutClean(self):
cmd = ("trap 'exit 0' TERM; echo >&%d; read < %s" %
(self.proc_ready_helper.write_fd, self.fifo_file))
result = utils.RunCmd(["/bin/sh", "-c", cmd], timeout=0.2,
noclose_fds=[self.proc_ready_helper.write_fd],
postfork_fn=self.proc_ready_helper.Ready)
self.assertEqual(result.exit_code, 0)
def testTimeoutKill(self):
cmd = ["/bin/sh", "-c", "trap '' TERM; echo >&%d; read < %s" %
(self.proc_ready_helper.write_fd, self.fifo_file)]
timeout = 0.2
(out, err, status, ta) = \
utils.process._RunCmdPipe(cmd, {}, False, "/", False,
timeout, [self.proc_ready_helper.write_fd],
None,
_linger_timeout=0.2,
postfork_fn=self.proc_ready_helper.Ready)
self.assertTrue(status < 0)
self.assertEqual(-status, signal.SIGKILL)
def testTimeoutOutputAfterTerm(self):
cmd = ("trap 'echo sigtermed; exit 1' TERM; echo >&%d; read < %s" %
(self.proc_ready_helper.write_fd, self.fifo_file))
result = utils.RunCmd(["/bin/sh", "-c", cmd], timeout=0.2,
noclose_fds=[self.proc_ready_helper.write_fd],
postfork_fn=self.proc_ready_helper.Ready)
self.assertTrue(result.failed)
self.assertEqual(result.stdout, "sigtermed\n")
def testListRun(self):
"""Test list runs"""
result = utils.RunCmd(["true"])
self.assertEqual(result.signal, None)
self.assertEqual(result.exit_code, 0)
result = utils.RunCmd(["/bin/sh", "-c", "exit 1"])
self.assertEqual(result.signal, None)
self.assertEqual(result.exit_code, 1)
result = utils.RunCmd(["echo", "-n", self.magic])
self.assertEqual(result.signal, None)
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.stdout, self.magic)
def testFileEmptyOutput(self):
"""Test file output"""
result = utils.RunCmd(["true"], output=self.fname)
self.assertEqual(result.signal, None)
self.assertEqual(result.exit_code, 0)
self.assertFileContent(self.fname, "")
def testLang(self):
"""Test locale environment"""
old_env = os.environ.copy()
try:
os.environ["LANG"] = "en_US.UTF-8"
os.environ["LC_ALL"] = "en_US.UTF-8"
result = utils.RunCmd(["locale"])
for line in result.output.splitlines():
key, value = line.split("=", 1)
# Ignore these variables, they're overridden by LC_ALL
if key == "LANG" or key == "LANGUAGE":
continue
self.assertFalse(value and value != "C" and value != '"C"',
"Variable %s is set to the invalid value '%s'" % (key, value))
finally:
os.environ = old_env
def testDefaultCwd(self):
"""Test default working directory"""
self.assertEqual(utils.RunCmd(["pwd"]).stdout.strip(), "/")
def testCwd(self):
"""Test default working directory"""
self.assertEqual(utils.RunCmd(["pwd"], cwd="/").stdout.strip(), "/")
self.assertEqual(utils.RunCmd(["pwd"], cwd="/tmp").stdout.strip(),
"/tmp")
cwd = os.getcwd()
self.assertEqual(utils.RunCmd(["pwd"], cwd=cwd).stdout.strip(), cwd)
def testResetEnv(self):
"""Test environment reset functionality"""
self.assertEqual(utils.RunCmd(["env"], reset_env=True).stdout.strip(),
"")
self.assertEqual(utils.RunCmd(["env"], reset_env=True,
env={"FOO": "bar",}).stdout.strip(),
"FOO=bar")
def testNoFork(self):
"""Test that nofork raise an error"""
self.assertFalse(utils.process._no_fork)
utils.DisableFork()
try:
self.assertTrue(utils.process._no_fork)
self.assertRaises(errors.ProgrammerError, utils.RunCmd, ["true"])
finally:
utils.process._no_fork = False
self.assertFalse(utils.process._no_fork)
def testWrongParams(self):
"""Test wrong parameters"""
self.assertRaises(errors.ProgrammerError, utils.RunCmd, ["true"],
output="/dev/null", interactive=True)
def testNocloseFds(self):
"""Test selective fd retention (noclose_fds)"""
temp = open(self.fname, "r+")
try:
temp.write("test")
temp.seek(0)
cmd = "read -u %d; echo $REPLY" % temp.fileno()
result = utils.RunCmd(["/bin/bash", "-c", cmd])
self.assertEqual(result.stdout.strip(), "")
temp.seek(0)
result = utils.RunCmd(["/bin/bash", "-c", cmd],
noclose_fds=[temp.fileno()])
self.assertEqual(result.stdout.strip(), "test")
finally:
temp.close()
def testNoInputRead(self):
testfile = testutils.TestDataFilename("cert1.pem")
result = utils.RunCmd(["cat"], timeout=10.0)
self.assertFalse(result.failed)
self.assertEqual(result.stderr, "")
self.assertEqual(result.stdout, "")
def testInputFileHandle(self):
testfile = testutils.TestDataFilename("cert1.pem")
with open(testfile, "r") as input_file:
result = utils.RunCmd(["cat"], input_fd=input_file)
self.assertFalse(result.failed)
self.assertEqual(result.stdout, utils.ReadFile(testfile))
self.assertEqual(result.stderr, "")
def testInputNumericFileDescriptor(self):
testfile = testutils.TestDataFilename("cert2.pem")
fh = open(testfile, "r")
try:
result = utils.RunCmd(["cat"], input_fd=fh.fileno())
finally:
fh.close()
self.assertFalse(result.failed)
self.assertEqual(result.stdout, utils.ReadFile(testfile))
self.assertEqual(result.stderr, "")
def testInputWithCloseFds(self):
testfile = testutils.TestDataFilename("cert1.pem")
temp = open(self.fname, "r+")
try:
temp.write("test283523367")
temp.seek(0)
with open(testfile, "r") as input_file:
result = utils.RunCmd(["/bin/bash", "-c",
("cat && read -u %s; echo $REPLY" %
temp.fileno())],
input_fd=input_file,
noclose_fds=[temp.fileno()])
self.assertFalse(result.failed)
self.assertEqual(result.stdout.strip(),
utils.ReadFile(testfile) + "test283523367")
self.assertEqual(result.stderr, "")
finally:
temp.close()
def testOutputAndInteractive(self):
self.assertRaises(errors.ProgrammerError, utils.RunCmd,
[], output=self.fname, interactive=True)
def testOutputAndInput(self):
with open(self.fname) as input_file:
self.assertRaises(errors.ProgrammerError, utils.RunCmd,
[], output=self.fname, input_fd=input_file)
class TestRunParts(testutils.GanetiTestCase):
"""Testing case for the RunParts function"""
def setUp(self):
self.rundir = tempfile.mkdtemp(prefix="ganeti-test", suffix=".tmp")
def tearDown(self):
shutil.rmtree(self.rundir)
def testEmpty(self):
"""Test on an empty dir"""
self.assertEqual(utils.RunParts(self.rundir, reset_env=True), [])
def testSkipWrongName(self):
"""Test that wrong files are skipped"""
fname = os.path.join(self.rundir, "00test.dot")
utils.WriteFile(fname, data="")
os.chmod(fname, stat.S_IREAD | stat.S_IEXEC)
relname = os.path.basename(fname)
self.assertEqual(utils.RunParts(self.rundir, reset_env=True),
[(relname, constants.RUNPARTS_SKIP, None)])
def testSkipNonExec(self):
"""Test that non executable files are skipped"""
fname = os.path.join(self.rundir, "00test")
utils.WriteFile(fname, data="")
relname = os.path.basename(fname)
self.assertEqual(utils.RunParts(self.rundir, reset_env=True),
[(relname, constants.RUNPARTS_SKIP, None)])
def testError(self):
"""Test error on a broken executable"""
fname = os.path.join(self.rundir, "00test")
utils.WriteFile(fname, data="")
os.chmod(fname, stat.S_IREAD | stat.S_IEXEC)
(relname, status, error) = utils.RunParts(self.rundir, reset_env=True)[0]
self.assertEqual(relname, os.path.basename(fname))
self.assertEqual(status, constants.RUNPARTS_ERR)
self.assertTrue(error)
def testSorted(self):
"""Test executions are sorted"""
files = []
files.append(os.path.join(self.rundir, "64test"))
files.append(os.path.join(self.rundir, "00test"))
files.append(os.path.join(self.rundir, "42test"))
for fname in files:
utils.WriteFile(fname, data="")
results = utils.RunParts(self.rundir, reset_env=True)
for fname in sorted(files):
self.assertEqual(os.path.basename(fname), results.pop(0)[0])
def testOk(self):
"""Test correct execution"""
fname = os.path.join(self.rundir, "00test")
utils.WriteFile(fname, data="#!/bin/sh\n\necho -n ciao")
os.chmod(fname, stat.S_IREAD | stat.S_IEXEC)
(relname, status, runresult) = \
utils.RunParts(self.rundir, reset_env=True)[0]
self.assertEqual(relname, os.path.basename(fname))
self.assertEqual(status, constants.RUNPARTS_RUN)
self.assertEqual(runresult.stdout, "ciao")
def testRunFail(self):
"""Test correct execution, with run failure"""
fname = os.path.join(self.rundir, "00test")
utils.WriteFile(fname, data="#!/bin/sh\n\nexit 1")
os.chmod(fname, stat.S_IREAD | stat.S_IEXEC)
(relname, status, runresult) = \
utils.RunParts(self.rundir, reset_env=True)[0]
self.assertEqual(relname, os.path.basename(fname))
self.assertEqual(status, constants.RUNPARTS_RUN)
self.assertEqual(runresult.exit_code, 1)
self.assertTrue(runresult.failed)
def testRunMix(self):
files = []
files.append(os.path.join(self.rundir, "00test"))
files.append(os.path.join(self.rundir, "42test"))
files.append(os.path.join(self.rundir, "64test"))
files.append(os.path.join(self.rundir, "99test"))
files.sort()
# 1st has errors in execution
utils.WriteFile(files[0], data="#!/bin/sh\n\nexit 1")
os.chmod(files[0], stat.S_IREAD | stat.S_IEXEC)
# 2nd is skipped
utils.WriteFile(files[1], data="")
# 3rd cannot execute properly
utils.WriteFile(files[2], data="")
os.chmod(files[2], stat.S_IREAD | stat.S_IEXEC)
# 4th execs
utils.WriteFile(files[3], data="#!/bin/sh\n\necho -n ciao")
os.chmod(files[3], stat.S_IREAD | stat.S_IEXEC)
results = utils.RunParts(self.rundir, reset_env=True)
(relname, status, runresult) = results[0]
self.assertEqual(relname, os.path.basename(files[0]))
self.assertEqual(status, constants.RUNPARTS_RUN)
self.assertEqual(runresult.exit_code, 1)
self.assertTrue(runresult.failed)
(relname, status, runresult) = results[1]
self.assertEqual(relname, os.path.basename(files[1]))
self.assertEqual(status, constants.RUNPARTS_SKIP)
self.assertEqual(runresult, None)
(relname, status, runresult) = results[2]
self.assertEqual(relname, os.path.basename(files[2]))
self.assertEqual(status, constants.RUNPARTS_ERR)
self.assertTrue(runresult)
(relname, status, runresult) = results[3]
self.assertEqual(relname, os.path.basename(files[3]))
self.assertEqual(status, constants.RUNPARTS_RUN)
self.assertEqual(runresult.output, "ciao")
self.assertEqual(runresult.exit_code, 0)
self.assertTrue(not runresult.failed)
def testMissingDirectory(self):
nosuchdir = utils.PathJoin(self.rundir, "no/such/directory")
self.assertEqual(utils.RunParts(nosuchdir), [])
class TestStartDaemon(testutils.GanetiTestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="ganeti-test")
self.tmpfile = os.path.join(self.tmpdir, "test")
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testShell(self):
utils.StartDaemon("echo Hello World > %s" % self.tmpfile)
self._wait(self.tmpfile, 60.0, "Hello World")
def testShellOutput(self):
utils.StartDaemon("echo Hello World", output=self.tmpfile)
self._wait(self.tmpfile, 60.0, "Hello World")
def testNoShellNoOutput(self):
utils.StartDaemon(["pwd"])
def testNoShellNoOutputTouch(self):
testfile = os.path.join(self.tmpdir, "check")
self.assertFalse(os.path.exists(testfile))
utils.StartDaemon(["touch", testfile])
self._wait(testfile, 60.0, "")
def testNoShellOutput(self):
utils.StartDaemon(["pwd"], output=self.tmpfile)
self._wait(self.tmpfile, 60.0, "/")
def testNoShellOutputCwd(self):
utils.StartDaemon(["pwd"], output=self.tmpfile, cwd=os.getcwd())
self._wait(self.tmpfile, 60.0, os.getcwd())
def testShellEnv(self):
utils.StartDaemon("echo \"$GNT_TEST_VAR\"", output=self.tmpfile,
env={ "GNT_TEST_VAR": "Hello World", })
self._wait(self.tmpfile, 60.0, "Hello World")
def testNoShellEnv(self):
utils.StartDaemon(["printenv", "GNT_TEST_VAR"], output=self.tmpfile,
env={ "GNT_TEST_VAR": "Hello World", })
self._wait(self.tmpfile, 60.0, "Hello World")
def testOutputFd(self):
fd = os.open(self.tmpfile, os.O_WRONLY | os.O_CREAT)
try:
utils.StartDaemon(["pwd"], output_fd=fd, cwd=os.getcwd())
finally:
os.close(fd)
self._wait(self.tmpfile, 60.0, os.getcwd())
def testPid(self):
pid = utils.StartDaemon("echo $$ > %s" % self.tmpfile)
self._wait(self.tmpfile, 60.0, str(pid))
def testPidFile(self):
pidfile = os.path.join(self.tmpdir, "pid")
checkfile = os.path.join(self.tmpdir, "abort")
pid = utils.StartDaemon("while sleep 5; do :; done", pidfile=pidfile,
output=self.tmpfile)
try:
fd = os.open(pidfile, os.O_RDONLY)
try:
# Check file is locked
self.assertRaises(errors.LockError, utils.LockFile, fd)
pidtext = os.read(fd, 100)
finally:
os.close(fd)
self.assertEqual(int(pidtext.strip()), pid)
self.assertTrue(utils.IsProcessAlive(pid))
finally:
# No matter what happens, kill daemon
utils.KillProcess(pid, timeout=5.0, waitpid=False)
self.assertFalse(utils.IsProcessAlive(pid))
self.assertEqual(utils.ReadFile(self.tmpfile), "")
def _wait(self, path, timeout, expected):
# Due to the asynchronous nature of daemon processes, polling is necessary.
# A timeout makes sure the test doesn't hang forever.
def _CheckFile():
if not (os.path.isfile(path) and
utils.ReadFile(path).strip() == expected):
raise utils.RetryAgain()
try:
utils.Retry(_CheckFile, (0.01, 1.5, 1.0), timeout)
except utils.RetryTimeout:
self.fail("Apparently the daemon didn't run in %s seconds and/or"
" didn't write the correct output" % timeout)
def testError(self):
self.assertRaises(errors.OpExecError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"])
self.assertRaises(errors.OpExecError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"],
output=os.path.join(self.tmpdir, "DIR/NOT/EXIST"))
self.assertRaises(errors.OpExecError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"],
cwd=os.path.join(self.tmpdir, "DIR/NOT/EXIST"))
self.assertRaises(errors.OpExecError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"],
output=os.path.join(self.tmpdir, "DIR/NOT/EXIST"))
fd = os.open(self.tmpfile, os.O_WRONLY | os.O_CREAT)
try:
self.assertRaises(errors.ProgrammerError, utils.StartDaemon,
["./does-NOT-EXIST/here/0123456789"],
output=self.tmpfile, output_fd=fd)
finally:
os.close(fd)
class RunInSeparateProcess(unittest.TestCase):
def test(self):
for exp in [True, False]:
def _child():
return exp
self.assertEqual(exp, utils.RunInSeparateProcess(_child))
def testArgs(self):
for arg in [0, 1, 999, "Hello World", (1, 2, 3)]:
def _child(carg1, carg2):
return carg1 == "Foo" and carg2 == arg
self.assertTrue(utils.RunInSeparateProcess(_child, "Foo", arg))
def testPid(self):
parent_pid = os.getpid()
def _check():
return os.getpid() == parent_pid
self.assertFalse(utils.RunInSeparateProcess(_check))
def testSignal(self):
def _kill():
os.kill(os.getpid(), signal.SIGTERM)
self.assertRaises(errors.GenericError,
utils.RunInSeparateProcess, _kill)
def testException(self):
def _exc():
raise errors.GenericError("This is a test")
self.assertRaises(errors.GenericError,
utils.RunInSeparateProcess, _exc)
class GetCmdline(unittest.TestCase):
def test(self):
sample_cmd = "sleep 20; true"
child = subprocess.Popen(sample_cmd, shell=True)
pid = child.pid
cmdline = utils.GetProcCmdline(pid)
# As the popen will quote and pass on the sample_cmd, it should be returned
# by the function as an element in the list of arguments
self.assertTrue(sample_cmd in cmdline)
child.kill()
child.wait()
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
from numpy.testing import *
from scipy.interpolate import KroghInterpolator, krogh_interpolate, \
BarycentricInterpolator, barycentric_interpolate, \
PiecewisePolynomial, piecewise_polynomial_interpolate, \
approximate_taylor_polynomial
import scipy
import numpy as np
from scipy.interpolate import splrep, splev
class CheckKrogh(TestCase):
def setUp(self):
self.true_poly = scipy.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
def test_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_low_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs,len(self.xs)+2)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
m = 10
r = P.derivatives(self.test_xs,m)
for i in xrange(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_high_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
for i in xrange(len(self.xs),2*len(self.xs)):
assert_almost_equal(P.derivative(self.test_xs,i),
np.zeros(len(self.test_xs)))
def test_hermite(self):
xs = [0,0,0,1,1,1,2]
ys = [self.true_poly(0),
self.true_poly.deriv(1)(0),
self.true_poly.deriv(2)(0),
self.true_poly(1),
self.true_poly.deriv(1)(1),
self.true_poly.deriv(2)(1),
self.true_poly(2)]
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = KroghInterpolator(xs,ys)
Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
assert_almost_equal(P.derivatives(test_xs),
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
(1,2,0)))
def test_empty(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(P([]), [])
def test_shapes_scalarvalue(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_scalarvalue_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,))
assert_array_equal(np.shape(P.derivatives([0])), (n,1))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
def test_shapes_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_shapes_vectorvalue_derivative(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,3))
assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
def test_wrapper(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(P(self.test_xs),krogh_interpolate(self.xs,self.ys,self.test_xs))
assert_almost_equal(P.derivative(self.test_xs,2),krogh_interpolate(self.xs,self.ys,self.test_xs,der=2))
assert_almost_equal(P.derivatives(self.test_xs,2),krogh_interpolate(self.xs,self.ys,self.test_xs,der=[0,1]))
class CheckTaylor(TestCase):
def test_exponential(self):
degree = 5
p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
for i in xrange(degree+1):
assert_almost_equal(p(0),1)
p = p.deriv()
assert_almost_equal(p(0),0)
class CheckBarycentric(TestCase):
def setUp(self):
self.true_poly = scipy.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
def test_delayed(self):
P = BarycentricInterpolator(self.xs)
P.set_yi(self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_append(self):
P = BarycentricInterpolator(self.xs[:3],self.ys[:3])
P.add_xi(self.xs[3:],self.ys[3:])
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = BarycentricInterpolator(xs,ys)
Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
def test_shapes_scalarvalue(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_wrapper(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(P(self.test_xs),barycentric_interpolate(self.xs,self.ys,self.test_xs))
class CheckPiecewise(TestCase):
def setUp(self):
self.tck = splrep([0,1,2,3,4,5],[0,10,-1,3,7,2],s=0)
self.test_xs = np.linspace(-1,6,100)
self.spline_ys = splev(self.test_xs, self.tck)
self.spline_yps = splev(self.test_xs, self.tck, der=1)
self.xi = np.unique(self.tck[0])
self.yi = [[splev(x,self.tck,der=j) for j in xrange(3)] for x in self.xi]
def test_construction(self):
P = PiecewisePolynomial(self.xi,self.yi,3)
assert_almost_equal(P(self.test_xs),self.spline_ys)
def test_scalar(self):
P = PiecewisePolynomial(self.xi,self.yi,3)
assert_almost_equal(P(self.test_xs[0]),self.spline_ys[0])
assert_almost_equal(P.derivative(self.test_xs[0],1),self.spline_yps[0])
def test_derivative(self):
P = PiecewisePolynomial(self.xi,self.yi,3)
assert_almost_equal(P.derivative(self.test_xs,1),self.spline_yps)
def test_derivatives(self):
P = PiecewisePolynomial(self.xi,self.yi,3)
m = 4
r = P.derivatives(self.test_xs,m)
#print r.shape, r
for i in xrange(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_vector(self):
xs = [0, 1, 2]
ys = [[[0,1]],[[1,0],[-1,-1]],[[2,1]]]
P = PiecewisePolynomial(xs,ys)
Pi = [PiecewisePolynomial(xs,[[yd[i] for yd in y] for y in ys])
for i in xrange(len(ys[0][0]))]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
assert_almost_equal(P.derivative(test_xs,1),
np.transpose(np.asarray([p.derivative(test_xs,1) for p in Pi]),
(1,0)))
def test_incremental(self):
P = PiecewisePolynomial([self.xi[0]], [self.yi[0]], 3)
for i in xrange(1,len(self.xi)):
P.append(self.xi[i],self.yi[i],3)
assert_almost_equal(P(self.test_xs),self.spline_ys)
def test_shapes_scalarvalue(self):
P = PiecewisePolynomial(self.xi,self.yi,4)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_scalarvalue_derivative(self):
P = PiecewisePolynomial(self.xi,self.yi,4)
n = 4
assert_array_equal(np.shape(P.derivative(0,1)), ())
assert_array_equal(np.shape(P.derivative([0],1)), (1,))
assert_array_equal(np.shape(P.derivative([0,1],1)), (2,))
def test_shapes_vectorvalue(self):
yi = np.multiply.outer(np.asarray(self.yi),np.arange(3))
P = PiecewisePolynomial(self.xi,yi,4)
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_vectorvalue_1d(self):
yi = np.multiply.outer(np.asarray(self.yi),np.arange(1))
P = PiecewisePolynomial(self.xi,yi,4)
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_shapes_vectorvalue_derivative(self):
P = PiecewisePolynomial(self.xi,np.multiply.outer(self.yi,np.arange(3)),4)
n = 4
assert_array_equal(np.shape(P.derivative(0,1)), (3,))
assert_array_equal(np.shape(P.derivative([0],1)), (1,3))
assert_array_equal(np.shape(P.derivative([0,1],1)), (2,3))
def test_wrapper(self):
P = PiecewisePolynomial(self.xi,self.yi)
assert_almost_equal(P(self.test_xs),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs))
assert_almost_equal(P.derivative(self.test_xs,2),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs,der=2))
assert_almost_equal(P.derivatives(self.test_xs,2),piecewise_polynomial_interpolate(self.xi,self.yi,self.test_xs,der=[0,1]))
if __name__=='__main__':
run_module_suite()
|
|
# Copyright 2014, Doug Wiegley (dougwig), A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import a10_neutron_lbaas.a10_exceptions as a10_ex
from a10_neutron_lbaas.tests.unit.v2 import fake_objs
from a10_neutron_lbaas.tests.unit.v2 import test_base
class TestLB(test_base.HandlerTestBase):
def test_create(self):
m = fake_objs.FakeLoadBalancer()
self.a.lb.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertTrue('call.slb.virtual_server.create' in s)
self.assertTrue('fake-lb-id-001' in s)
self.assertTrue('5.5.5.5' in s)
self.assertTrue('UP' in s)
def test_create_default_vrid_none_v21(self):
self._test_create_default_vrid("2.1", None)
def test_create_default_vrid_set_v21(self):
self._test_create_default_vrid("2.1", 7)
def test_create_default_vrid_none_v30(self):
self._test_create_default_vrid("3.0", None)
def test_create_default_vrid_set_v30(self):
self._test_create_default_vrid("3.0", 7)
def _test_create_default_vrid(self, api_ver=None, default_vrid=None):
"""
Due to how the config is pulled in, we override the config
for all of the devices.
"""
for k, v in self.a.config.get_devices().items():
v['api_version'] = api_ver
v['default_virtual_server_vrid'] = default_vrid
lb = fake_objs.FakeLoadBalancer()
self.a.lb.create(None, lb)
create = self.a.last_client.slb.virtual_server.create
create.assert_has_calls([mock.ANY])
calls = create.call_args_list
if default_vrid is not None:
self.assertIn('vrid=%s' % default_vrid, str(calls))
if default_vrid is None:
foundVrid = any(
'vrid' in x.get('axapi_args', {}).get('virtual_server', {})
for (_, x) in calls)
self.assertFalse(
foundVrid,
'Expected to find no vrid in {0}'.format(str(calls)))
def _test_create_template_virtual_server(self, api_ver=None, template_virtual_server=None):
"""
Due to how the config is pulled in, we override the config
for all of the devices.
"""
for k, v in self.a.config.get_devices().items():
v['api_version'] = api_ver
v['template-virtual-server'] = template_virtual_server
lb = fake_objs.FakeLoadBalancer()
self.a.lb.create(None, lb)
create = self.a.last_client.slb.virtual_server.create
create.assert_has_calls([mock.ANY])
calls = create.call_args_list
if template_virtual_server is not None:
self.assertIn("template_virtual_server='%s'" % template_virtual_server, str(calls))
if template_virtual_server is None:
foundVrid = any(
'template_virtual_server' in x.get('axapi_args', {}).get('virtual_server', {})
for (_, x) in calls)
self.assertFalse(
foundVrid,
'Expected to find no template-virtual-server in {0}'.format(str(calls)))
def test_create_template_virtual_server(self):
self._test_create_template_virtual_server("2.1", None)
def test_create_default_vrid_set_v21_with_template(self):
self._test_create_template_virtual_server("2.1", "testTemplate")
def test_create_default_vrid_none_v30_with_template(self):
self._test_create_template_virtual_server("3.0", None)
def test_create_default_vrid_set_v30_with_template(self):
self._test_create_template_virtual_server("3.0", "testTemplate")
def _test_create_virtual_server_templates(self, api_ver="3.0",
virtual_server_templates=None, update=False):
for k, v in self.a.config.get_devices().items():
v['api_version'] = api_ver
v['templates'] = virtual_server_templates
lb = fake_objs.FakeLoadBalancer()
if update:
self.a.lb.update(None, lb, lb)
create = self.a.last_client.slb.virtual_server.update
else:
self.a.lb.create(None, lb)
create = self.a.last_client.slb.virtual_server.create
create.assert_has_calls([mock.ANY])
calls = create.call_args_list
if virtual_server_templates is not None:
self.assertIn('test-template-virtual-server', str(calls))
self.assertIn('test-template-logging', str(calls))
self.assertIn('test-policy', str(calls))
if virtual_server_templates is None:
foundVrid = any(
'virtual_server_templates' in x.get('axapi_args', {}).get('virtual_server', {})
for (_, x) in calls)
self.assertFalse(
foundVrid,
'Expected to find no virtual_server_templates in {0}'.format(str(calls)))
def test_create_with_template_virtual_server(self):
template = {
"virtual-server": {
"template-virtual-server": "test-template-virtual-server",
"template-logging": "test-template-logging",
"template-policy": "test-policy",
"template-scaleout": "test-scaleout",
}
}
self._test_create_virtual_server_templates("3.0", virtual_server_templates=template)
def test_update_with_template_virtual_server(self):
template = {
"virtual-server": {
"template-virtual-server": "test-template-virtual-server",
"template-logging": "test-template-logging",
"template-policy": "test-policy",
"template-scaleout": "test-scaleout",
}
}
self._test_create_virtual_server_templates("3.0",
virtual_server_templates=template,
update=True)
# There's no code that causes listeners to be added
# if they are present when the pool is created.
# We'd use unittest.skip if it worked with cursed 2.6
# def test_create_with_listeners(self):
# pool = test_base.FakePool('HTTP', 'ROUND_ROBIN', None)
# m = test_base.FakeLoadBalancer()
# for x in [1, 2, 3]:
# z = test_base.FakeListener('TCP', 2222+x, pool=pool,
# loadbalancer=m)
# m.listeners.append(z)
# self.a.lb.create(None, m)
# s = str(self.a.last_client.mock_calls)
# print ("LAST CALLS {0}".format(s))
# self.assertTrue('call.slb.virtual_server.create' in s)
# self.assertTrue('fake-lb-id-001' in s)
# self.assertTrue('5.5.5.5' in s)
# self.assertTrue('UP' in s)
# self.assertTrue('vport.create' in s)
# for x in [1, 2, 3]:
# self.assertTrue(str(2222+x) in s)
def test_update_down(self):
m = fake_objs.FakeLoadBalancer()
m.admin_state_up = False
self.a.lb.update(None, m, m)
s = str(self.a.last_client.mock_calls)
self.assertTrue('call.slb.virtual_server.update' in s)
self.assertTrue('fake-lb-id-001' in s)
self.assertTrue('5.5.5.5' in s)
self.assertTrue('DOWN' in s)
def test_delete(self):
m = fake_objs.FakeLoadBalancer()
self.a.lb.delete(None, m)
s = str(self.a.last_client.mock_calls)
self.assertTrue('call.slb.virtual_server.delete' in s)
self.assertTrue(m.id in s)
def test_delete_removes_slb(self):
m = fake_objs.FakeLoadBalancer()
self.a.lb.delete(None, m)
def test_refresh(self):
try:
self.a.lb.refresh(None, fake_objs.FakeLoadBalancer())
except a10_ex.UnsupportedFeature:
pass
def test_stats_v30(self):
test_lb = fake_objs.FakeLoadBalancer()
test_lb.stats_v30()
c = mock.MagicMock()
c.client.slb.virtual_server.get = mock.Mock(return_value=test_lb.virt_server)
c.client.slb.service_group.stats = mock.Mock(return_value=test_lb.service_group)
c.client.slb.service_group.get = mock.Mock(return_value=test_lb.members)
ret_val = self.a.lb._stats_v30(c, test_lb.port_list, None)
self.print_mocks()
self.assertEqual(ret_val, test_lb.ret_stats_v30)
def test_stats_v21(self):
test_lb = fake_objs.FakeLoadBalancer()
test_lb.stats_v21()
c = mock.MagicMock()
c.client.slb.virtual_service.get = mock.Mock(return_value=test_lb.virt_service)
c.client.slb.service_group.stats = mock.Mock(return_value=test_lb.serv_group)
ret_val = self.a.lb._stats_v21(c, test_lb.virt_server)
self.print_mocks()
self.assertEqual(ret_val, test_lb.ret_stats)
def test_stats(self):
test_lb = fake_objs.FakeLoadBalancer()
self.a.lb.stats(None, test_lb)
self.print_mocks()
s = str(self.a.last_client.mock_calls)
self.assertTrue('call.slb.virtual_server.stats' in s)
def do_raise_exception(self, e, msg="mock raised exception"):
def raise_exception(e, msg="acos broke!"):
raise e(msg)
return lambda *args, **kwargs: raise_exception(e, msg)
def _test_create_expressions(self, os_name, pattern, expressions=None):
self.a.config.get_virtual_server_expressions = self._get_expressions_mock
expressions = expressions or self.a.config.get_virtual_server_expressions()
expected = expressions.get(pattern, {}).get("json", None) or ""
m = fake_objs.FakeLoadBalancer()
m.name = os_name
handler = self.a.lb
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("virtual_server.create", s)
self.assertIn(str(expected), s)
def test_create_expressions_none(self):
self._test_create_expressions("mylb", None, {})
def test_create_expressions_match_beginning(self):
self._test_create_expressions("securelb", self.EXPR_BEGIN)
def test_create_expressions_match_end(self):
self._test_create_expressions("lbweb", self.EXPR_END)
def test_create_expressions_match_charclass(self):
self._test_create_expressions("lbwwlb", self.EXPR_CLASS)
def test_create_expressions_nomatch(self):
self.a.config.get_virtual_server_expressions = self._get_expressions_mock
expressions = self.a.config.get_virtual_server_expressions()
expected = expressions.get(self.EXPR_BEGIN, {}).get("json", None) or ""
m = fake_objs.FakeLoadBalancer()
m.name = "mylb"
handler = self.a.lb
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("virtual_server.create", s)
self.assertNotIn(str(expected), s)
def test_create_noname_noexception(self):
self.a.config.get_virtual_server_expressions = self._get_expressions_mock
expressions = self.a.config.get_virtual_server_expressions()
expected = expressions.get(self.EXPR_BEGIN, {}).get("json", None) or ""
m = fake_objs.FakeLoadBalancer()
m.name = None
handler = self.a.lb
handler.create(None, m)
s = str(self.a.last_client.mock_calls)
self.assertIn("virtual_server.create", s)
self.assertNotIn(str(expected), s)
|
|
import datetime
import json
import re
import sys
import time
from email.header import Header
from http.client import responses
from urllib.parse import urlparse
from django.conf import settings
from django.core import signals, signing
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.http.cookie import SimpleCookie
from django.utils import timezone
from django.utils.encoding import force_bytes, force_text, iri_to_uri
from django.utils.http import cookie_date
_charset_from_content_type_re = re.compile(r';\s*charset=(?P<charset>[^\s;]+)', re.I)
class BadHeaderError(ValueError):
pass
class HttpResponseBase:
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
def __init__(self, content_type=None, status=None, reason=None, charset=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._closable_objects = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
self.cookies = SimpleCookie()
self.closed = False
if status is not None:
try:
self.status_code = int(status)
except (ValueError, TypeError):
raise TypeError('HTTP status code must be an integer.')
if not 100 <= self.status_code <= 599:
raise ValueError('HTTP status code must be an integer from 100 to 599.')
self._reason_phrase = reason
self._charset = charset
if content_type is None:
content_type = '%s; charset=%s' % (settings.DEFAULT_CONTENT_TYPE,
self.charset)
self['Content-Type'] = content_type
@property
def reason_phrase(self):
if self._reason_phrase is not None:
return self._reason_phrase
# Leave self._reason_phrase unset in order to use the default
# reason phrase for status code.
return responses.get(self.status_code, 'Unknown Status Code')
@reason_phrase.setter
def reason_phrase(self, value):
self._reason_phrase = value
@property
def charset(self):
if self._charset is not None:
return self._charset
content_type = self.get('Content-Type', '')
matched = _charset_from_content_type_re.search(content_type)
if matched:
# Extract the charset and strip its double quotes
return matched.group('charset').replace('"', '')
return settings.DEFAULT_CHARSET
@charset.setter
def charset(self, value):
self._charset = value
def serialize_headers(self):
"""HTTP headers as a bytestring."""
def to_bytes(val, encoding):
return val if isinstance(val, bytes) else val.encode(encoding)
headers = [
(b': '.join([to_bytes(key, 'ascii'), to_bytes(value, 'latin-1')]))
for key, value in self._headers.values()
]
return b'\r\n'.join(headers)
__bytes__ = serialize_headers
@property
def _content_type_for_repr(self):
return ', "%s"' % self['Content-Type'] if 'Content-Type' in self else ''
def _convert_to_charset(self, value, charset, mime_encode=False):
"""Converts headers key/value to ascii/latin-1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` can't be represented in the given charset, MIME-encoding
is applied.
"""
if not isinstance(value, (bytes, str)):
value = str(value)
if ((isinstance(value, bytes) and (b'\n' in value or b'\r' in value)) or
isinstance(value, str) and ('\n' in value or '\r' in value)):
raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
try:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
else:
# Convert bytestring using given charset
value = value.decode(charset)
except UnicodeError as e:
if mime_encode:
value = Header(value, 'utf-8', maxlinelen=sys.maxsize).encode()
else:
e.reason += ', HTTP response headers must be in %s format' % charset
raise
return value
def __setitem__(self, header, value):
header = self._convert_to_charset(header, 'ascii')
value = self._convert_to_charset(value, 'latin-1', mime_encode=True)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
else:
self.cookies[key]['expires'] = ''
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def setdefault(self, key, value):
"""Sets a header unless it has already been set."""
if key not in self:
self[key] = value
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# Handle string types -- we can't rely on force_bytes here because:
# - Python attempts str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, bytes):
return bytes(value)
if isinstance(value, str):
return bytes(value.encode(self.charset))
# Handle non-string types (#16494)
return force_bytes(value, self.charset)
# These methods partially implement the file-like object interface.
# See https://docs.python.org/3/library/io.html#io.IOBase
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closable in self._closable_objects:
try:
closable.close()
except Exception:
pass
self.closed = True
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise IOError("This %s instance is not writable" % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise IOError("This %s instance cannot tell its position" % self.__class__.__name__)
# These methods partially implement a stream-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
def readable(self):
return False
def seekable(self):
return False
def writable(self):
return False
def writelines(self, lines):
raise IOError("This %s instance is not writable" % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content that can be read, appended to or replaced.
"""
streaming = False
def __init__(self, content=b'', *args, **kwargs):
super(HttpResponse, self).__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def __repr__(self):
return '<%(cls)s status_code=%(status_code)d%(content_type)s>' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
}
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b'\r\n\r\n' + self.content
__bytes__ = serialize
@property
def content(self):
return b''.join(self._container)
@content.setter
def content(self, value):
# Consume iterators upon assignment to allow repeated iteration.
if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):
content = b''.join(self.make_bytes(chunk) for chunk in value)
if hasattr(value, 'close'):
try:
value.close()
except Exception:
pass
else:
content = self.make_bytes(value)
# Create a list of properly encoded bytestrings to support write().
self._container = [content]
def __iter__(self):
return iter(self._container)
def write(self, content):
self._container.append(self.make_bytes(content))
def tell(self):
return len(self.content)
def getvalue(self):
return self.content
def writable(self):
return True
def writelines(self, lines):
for line in lines:
self.write(line)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super(StreamingHttpResponse, self).__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
@property
def content(self):
raise AttributeError(
"This %s instance has no `content` attribute. Use "
"`streaming_content` instead." % self.__class__.__name__
)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
self._set_streaming_content(value)
def _set_streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, 'close'):
self._closable_objects.append(value)
def __iter__(self):
return self.streaming_content
def getvalue(self):
return b''.join(self.streaming_content)
class FileResponse(StreamingHttpResponse):
"""
A streaming HTTP response class optimized for files.
"""
block_size = 4096
def _set_streaming_content(self, value):
if hasattr(value, 'read'):
self.file_to_stream = value
filelike = value
if hasattr(filelike, 'close'):
self._closable_objects.append(filelike)
value = iter(lambda: filelike.read(self.block_size), b'')
else:
self.file_to_stream = None
super(FileResponse, self)._set_streaming_content(value)
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to, *args, **kwargs):
super(HttpResponseRedirectBase, self).__init__(*args, **kwargs)
self['Location'] = iri_to_uri(redirect_to)
parsed = urlparse(force_text(redirect_to))
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
url = property(lambda self: self['Location'])
def __repr__(self):
return '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
'url': self.url,
}
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super(HttpResponseNotModified, self).__init__(*args, **kwargs)
del self['content-type']
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError("You cannot set content to a 304 (Not Modified) response")
self._container = []
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super(HttpResponseNotAllowed, self).__init__(*args, **kwargs)
self['Allow'] = ', '.join(permitted_methods)
def __repr__(self):
return '<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
'methods': self['Allow'],
}
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before EcmaScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be an json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
:param json_dumps_params: A dictionary of kwargs passed to json.dumps().
"""
def __init__(self, data, encoder=DjangoJSONEncoder, safe=True,
json_dumps_params=None, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError(
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False.'
)
if json_dumps_params is None:
json_dumps_params = {}
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder, **json_dumps_params)
super(JsonResponse, self).__init__(content=data, **kwargs)
|
|
'''
Created on 9 Mar 2015
@author: Tan Ming Sheng
'''
import json, collections, Queue, time, re
from scrapy.spider import Spider
from scrapy import signals, Request, Selector
from scrapy.contrib.loader import ItemLoader
from scrapy.item import Item, Field
from scrapy.exceptions import CloseSpider
from bs4 import BeautifulSoup
from urlparse import urljoin
from HTMLParser import HTMLParser
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
LINK_NUMBER = 50
class FocraSpider(Spider):
name = 'focras'
'''
To access scrapy's core API. basically can modify anything in the 'crawler'
'''
@classmethod
def from_crawler(cls, crawler, **kwargs):
print "focras - from crawler"
spider = cls(stats=crawler.stats, settings=crawler.settings, **kwargs)
crawler.signals.connect(spider.stopped, signals.engine_stopped)
crawler.signals.connect(spider.idle, signals.spider_idle)
return spider
def __init__(self, stats=None, settings=None, **kwargs):
super(FocraSpider, self).__init__(**kwargs)
try:
self.start_time = time.time()
print 'focras init(' + self.cname + ') kwargs seeds ' + kwargs.get('seeds')
print 'focras init(' + self.cname + ') kwargs template '+ self.template
self.queue = Queue.Queue()
self.queue_counter = 0
self.queue_reload_counter = 0
# to save the state of the pagination
self.next_page_link = None
self.end_of_data = False
self.template = json.loads(self.template, object_pairs_hook=collections.OrderedDict)
self.item = Item()
self.pager = HTMLParser().unescape(self.pager)
self.base_url = kwargs.get('seeds').split(',')
self.crawled_pages = 0
self.status = None
self.lcam = None
# non chain crawler dont have a queue, check for pager only
# chain crawler url does not start with http
if self.base_url[0].startswith('http'):
# for request_url of chain crawler
self.parentname = None
if self.runtype == 'resume' and self.pager != 'null':
db = client['FocraDB']
collection = db['crawler']
cursor_focra = collection.find_one({'_id':self.cname})
self.base_url = [cursor_focra.get('next_page_link')]
self.crawled_pages = cursor_focra.get('crawled_pages')
self.start_time = self.start_time - cursor_focra.get('time_executed')
client.close()
print self.cname + " - Resume page is: " + self.base_url[0]
self.start_urls = self.base_url
else:
print self.cname + " - Start page is: " + self.base_url[0]
self.start_urls = self.base_url
else:
# chain crawler
# get parent and field info from seeds
self.parentname = self.base_url.pop()
self.fieldname = self.base_url.pop()
# connect using parent name and get first 100 of the field name
self.crawler_db = settings['CRAWLER_DB']
db = client[self.crawler_db]
collection = db[self.parentname]
if self.runtype == 'resume':
db_focra = client['FocraDB']
cursor_focra = db_focra['crawler'].find_one({'_id': self.cname})
self.queue_counter = cursor_focra.get('queue_counter')
self.next_page_link = cursor_focra.get('next_page_link')
self.crawled_pages = cursor_focra.get('crawled_pages')
self.start_time = self.start_time - cursor_focra.get('time_executed')
print self.cname + " - Loading Queue from " + str(self.queue_counter)
cursor = collection.find({}, {self.fieldname: 1}).skip(self.queue_counter).limit(LINK_NUMBER)
self.queue_reload_counter = self.queue_reload_counter + LINK_NUMBER + self.queue_counter
else:
cursor = collection.find({}, {self.fieldname: 1}).limit(LINK_NUMBER)
# set the queue reload counter
self.queue_reload_counter += LINK_NUMBER
client.close()
if cursor.count() <= self.queue_reload_counter:
print self.cname + '- No more links to load'
self.end_of_data = True
# put it into queue
for link in cursor:
if link.get(self.fieldname):
soup = BeautifulSoup(link.get(self.fieldname))
# to see the links added to queue
#print soup.a['href']
self.queue.put(soup.a['href'])
# if resume
if self.next_page_link:
self.base_url = [self.next_page_link]
print self.cname + " - Resume page is: " + self.base_url[0]
self.start_urls = self.base_url
else:
self.base_url = [self.queue.get()]
if self.queue_counter == 0:
self.queue_counter += 1
print self.cname + " - Start page is: " + self.base_url[0]
else:
print self.cname + " - Resume page is: " + self.base_url[0]
self.start_urls = self.base_url
except Exception as error:
print error
# interrupted state, crawler status determined by views.py
# it is stopped or paused
def stopped(self):
try:
if self.runtype != 'complete':
print self.cname + " - Stopped"
db = client['FocraDB']
collection = db['crawler']
# chain crawler queue from parent crawler
if self.queue_counter != 0:
collection.update({"_id": self.cname}, {"$set":{'queue_counter': self.queue_counter,
'crawled_pages': self.crawled_pages,
'time_executed': time.time() - self.start_time}})
print self.cname + " - Saved queue counter is: " + str(self.queue_counter)
# main or chained crawler pager state
if self.pager != 'null' and self.next_page_link:
collection.update({"_id": self.cname}, {"$set":{'next_page_link': self.next_page_link,
'crawled_pages': self.crawled_pages,
'time_executed': time.time() - self.start_time}})
print self.cname + " - Saved Page link is: " + str(self.next_page_link)
client.close()
except Exception as err:
print err
# closed gracefully, crawler status complete
def idle(self):
try:
# crawl completed
if self.status == 'running':
db = client['FocraDB']
collection = db['crawler']
collection.update({"_id": self.cname}, {"$set":{'crawlerAddr': '',
'crawlerStatus': 'completed',
'crawled_pages': self.crawled_pages,
'time_executed': time.time() - self.start_time}})
print self.cname + " - Crawl completed, closing gracefully"
self.runtype = 'complete'
client.close()
except Exception as err:
print err
def parse(self, response):
try:
self.crawled_pages += 1
db = client['FocraDB']
db['crawler'].update({"_id": self.cname}, {"$set":{'crawled_pages': self.crawled_pages,
'time_executed': time.time()-self.start_time}})
print self.cname + " - Parsing items"
body = BeautifulSoup(response.body)
for tag in body.find_all('a', href=True):
if 'http' not in tag['href']:
tag['href'] = urljoin(self.base_url[0], tag['href'])
for tag in body.find_all('img', src=True):
if 'http' not in tag['src']:
tag['src'] = urljoin(self.base_url[0], tag['src'])
for t in body.find_all('tbody'):
t.unwrap()
response = response.replace(body=body.prettify(encoding='ascii'))
dynamicItemLoader = ItemLoader(item=self.item, response=response)
if self.parentname is not None:
self.item.clear()
self.item.fields['request_url'] = Field()
dynamicItemLoader.add_value("request_url", response.url)
'''
new codes
'''
r = None
d = {}
for k, v in self.template.iteritems():
d[k] = v.split('/')
lca = None
if self.lcam:
lca = self.lcam
else:
lca = self.longest_common_ancestor(d)
self.lcam = lca
print lca
if lca:
r = response.xpath(lca).extract()
if r:
if len(r) <= 1:
for key, value in self.template.iteritems():
self.item.fields[key] = Field()
dynamicItemLoader.add_xpath(key, value)
else:
for i in range(len(r)):
# data region
#print r[i].encode('ascii', 'ignore')
sel = Selector(text=r[i])
for key, value in self.template.iteritems():
self.item.fields[key] = Field()
#print self.get_xpath_tail(lca, value)
x = sel.xpath(self.get_xpath_tail(lca, value)).extract()
x = ''.join(x)
if x.startswith('<a') or x.startswith('<img'):
dynamicItemLoader.add_value(key, x)
else:
sb = ""
for string in BeautifulSoup(x).stripped_strings:
sb += "\n" + string
dynamicItemLoader.add_value(key, sb)
else:
for key, value in self.template.iteritems():
#print value
self.item.fields[key] = Field()
dynamicItemLoader.add_xpath(key, value)
print "yielded dynamic loader"
yield dynamicItemLoader.load_item()
# after scraping the page, check status to see whether we should stop
self.status = db['crawler'].find_one({"_id":self.cname}).get('crawlerStatus')
if self.status == 'stopped' or self.status == 'paused':
raise CloseSpider('stopped')
# check for pagination
if self.pager != 'null':
next_link = None
# if the pager is in html format
if bool(BeautifulSoup(self.pager, "html.parser").find()):
# remove the \r for 'end of line' diff
self.pager = self.pager.replace('\r', '')
a_tags = response.xpath('//a').extract()
for tag in a_tags:
if self.pager in tag:
tag = BeautifulSoup(tag)
next_link = tag.a.get('href')
break
# if the pager is in text format
else:
if response.xpath('//a[text()[normalize-space()="'+ self.pager +'"]]/@href').extract():
next_link = response.xpath('//a[text()[normalize-space()="'+ self.pager +'"]]/@href').extract()[0]
if next_link:
self.next_page_link = next_link
print self.cname + ' - Next page is: ' + self.next_page_link
print "yielded request top"
yield Request(self.next_page_link, callback=self.parse, dont_filter=True)
else:
# chained crawler WITH pagination
# check for more links from parent column
if not self.queue.empty():
k = self.queue.get()
print "yielded request middle ---"+k
yield Request(k, callback=self.parse, dont_filter=True)
self.queue_counter += 1
if self.queue.qsize() <= LINK_NUMBER and self.end_of_data == False:
self.check_queue()
else:
# chained crawler WITHOUT pagination
# check for more links from parent column
if not self.queue.empty():
l = self.queue.get()
print "yielded request btm ---"+l
yield Request(l, callback=self.parse, dont_filter=True)
self.queue_counter += 1
if self.queue.qsize() <= LINK_NUMBER and self.end_of_data == False:
self.check_queue()
except Exception as err:
print err
def check_queue(self):
try:
print self.cname + '- Reload counter ' + str(self.queue_reload_counter)
print self.cname + '- Queue less than ' + str(LINK_NUMBER) + ', querying for more links'
db = client[self.crawler_db]
collection = db[self.parentname]
cursor = collection.find({}, {self.fieldname: 1}).skip(self.queue_reload_counter).limit(LINK_NUMBER)
client.close()
self.queue_reload_counter += LINK_NUMBER
# cursor count returns the total row
if cursor.count() <= self.queue_reload_counter:
print self.cname + '- No more links to load'
self.end_of_data = True
# put it into queue
for link in cursor:
if link.get(self.fieldname):
soup = BeautifulSoup(link.get(self.fieldname))
# uncomment below to see queue links
#print soup.a['href']
self.queue.put(soup.a['href'])
except Exception as err:
print err
'''
find the lowest common ancestor
'''
def longest_common_ancestor(self, d):
if len(d) < 1:
return None
p = None
for l in d.values():
if len(l) < p or p is None:
p = len(l)
diff_index = None
for i in range(p):
check = None
for v in d.itervalues():
if check is None or check == v[i]:
check = v[i]
elif check != v[i]:
diff_index = i
break
if diff_index:
break
if diff_index:
# return None if root note is '/body' which is 2
# return None if root note is '/html' which is 1
# return None if root note is '/' which is 0
if diff_index < 3:
return None
sb = ""
for i in range(diff_index):
if i != 0:
sb += "/" + d.values()[0][i]
return sb
return None
def get_xpath_tail(self, lca, value):
last = lca.split("/")
return '//' + re.sub('[^A-Za-z]+', '', last[len(last)-1]) + value.replace(lca, "", 1)
|
|
# Copyright (c) 2014 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import pkg_resources as pkg
import six
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import exceptions as ex
from sahara.plugins.hdp import clusterspec as cs
from sahara.plugins.hdp import configprovider as cfgprov
from sahara.plugins.hdp.versions import abstractversionhandler as avm
from sahara.plugins.hdp.versions.version_2_0_6 import edp_engine
from sahara.plugins.hdp.versions.version_2_0_6 import services
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import poll_utils
from sahara import version
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VersionHandler(avm.AbstractVersionHandler):
config_provider = None
version = None
client = None
def _set_version(self, version):
self.version = version
def _get_config_provider(self):
if self.config_provider is None:
self.config_provider = cfgprov.ConfigurationProvider(
json.load(pkg.resource_stream(
version.version_info.package,
'plugins/hdp/versions/version_2_0_6/resources/'
'ambari-config-resource.json')),
hadoop_version='2.0.6')
return self.config_provider
def get_version(self):
return self.version
def get_ambari_client(self):
if not self.client:
self.client = AmbariClient(self)
return self.client
def get_config_items(self):
return self._get_config_provider().get_config_items()
def get_applicable_target(self, name):
return self._get_config_provider().get_applicable_target(name)
def get_cluster_spec(self, cluster, user_inputs,
scaled_groups=None, cluster_template=None):
if cluster_template:
cluster_spec = cs.ClusterSpec(cluster_template, '2.0.6')
else:
cluster_spec = self.get_default_cluster_configuration()
cluster_spec.create_operational_config(
cluster, user_inputs, scaled_groups)
cs.validate_number_of_datanodes(
cluster, scaled_groups, self.get_config_items())
return cluster_spec
def get_default_cluster_configuration(self):
return cs.ClusterSpec(self._get_default_cluster_template(), '2.0.6')
def _get_default_cluster_template(self):
return pkg.resource_string(
version.version_info.package,
'plugins/hdp/versions/version_2_0_6/resources/'
'default-cluster.template')
def get_node_processes(self):
node_processes = {}
for service in self.get_default_cluster_configuration().services:
components = []
for component in service.components:
components.append(component.name)
node_processes[service.name] = components
return node_processes
def install_swift_integration(self, servers):
if servers:
cpo.add_provisioning_step(
servers[0].cluster_id, _("Install swift integration"),
len(servers))
for server in servers:
with context.set_current_instance_id(
server.instance['instance_id']):
server.install_swift_integration()
def get_services_processor(self):
return services
def get_edp_engine(self, cluster, job_type):
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
return edp_engine.EdpOozieEngine(cluster)
return None
def get_edp_job_types(self):
return edp_engine.EdpOozieEngine.get_supported_job_types()
def get_edp_config_hints(self, job_type):
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
def get_open_ports(self, node_group):
ports = [8660] # for Ganglia
ports_map = {
'AMBARI_SERVER': [8080, 8440, 8441],
'NAMENODE': [50070, 50470, 8020, 9000],
'DATANODE': [50075, 50475, 50010, 8010],
'SECONDARY_NAMENODE': [50090],
'HISTORYSERVER': [19888],
'RESOURCEMANAGER': [8025, 8041, 8050, 8088],
'NODEMANAGER': [45454],
'HIVE_SERVER': [10000],
'HIVE_METASTORE': [9083],
'HBASE_MASTER': [60000, 60010],
'HBASE_REGIONSERVER': [60020, 60030],
'WEBHCAT_SERVER': [50111],
'GANGLIA_SERVER': [8661, 8662, 8663, 8651],
'MYSQL_SERVER': [3306],
'OOZIE_SERVER': [11000, 11001],
'ZOOKEEPER_SERVER': [2181, 2888, 3888],
'NAGIOS_SERVER': [80]
}
for process in node_group.node_processes:
if process in ports_map:
ports.extend(ports_map[process])
return ports
class AmbariClient(object):
def __init__(self, handler):
# add an argument for neutron discovery
self.handler = handler
def _get_http_session(self, host, port):
return host.remote().get_http_client(port)
def _get_standard_headers(self):
return {"X-Requested-By": "sahara"}
def _post(self, url, ambari_info, data=None):
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.post(url, data=data,
auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _delete(self, url, ambari_info):
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.delete(url,
auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _put(self, url, ambari_info, data=None):
session = self._get_http_session(ambari_info.host, ambari_info.port)
auth = (ambari_info.user, ambari_info.password)
return session.put(url, data=data, auth=auth,
headers=self._get_standard_headers())
def _get(self, url, ambari_info):
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.get(url, auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
def _add_cluster(self, ambari_info, name):
add_cluster_url = 'http://{0}/api/v1/clusters/{1}'.format(
ambari_info.get_address(), name)
result = self._post(add_cluster_url, ambari_info,
data='{"Clusters": {"version" : "HDP-' +
self.handler.get_version() + '"}}')
if result.status_code != 201:
LOG.error(_LE('Create cluster command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add cluster: %s') % result.text)
@cpo.event_wrapper(True, step=_("Add configurations to cluster"),
param=('ambari_info', 2))
def _add_configurations_to_cluster(
self, cluster_spec, ambari_info, name):
existing_config_url = ('http://{0}/api/v1/clusters/{1}?fields='
'Clusters/desired_configs'.format(
ambari_info.get_address(), name))
result = self._get(existing_config_url, ambari_info)
json_result = json.loads(result.text)
existing_configs = json_result['Clusters']['desired_configs']
configs = cluster_spec.get_deployed_configurations()
if 'ambari' in configs:
configs.remove('ambari')
if len(configs) == len(existing_configs):
# nothing to do
return
config_url = 'http://{0}/api/v1/clusters/{1}'.format(
ambari_info.get_address(), name)
body = {}
clusters = {}
version = 1
body['Clusters'] = clusters
for config_name in configs:
if config_name in existing_configs:
if config_name == 'core-site' or config_name == 'global':
existing_version = (existing_configs[config_name]['tag']
.lstrip('v'))
version = int(existing_version) + 1
else:
continue
config_body = {}
clusters['desired_config'] = config_body
config_body['type'] = config_name
config_body['tag'] = 'v%s' % version
config_body['properties'] = (
cluster_spec.configurations[config_name])
result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Set configuration command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to set configurations on cluster: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add services to cluster"), param=('ambari_info', 2))
def _add_services_to_cluster(self, cluster_spec, ambari_info, name):
services = cluster_spec.services
add_service_url = 'http://{0}/api/v1/clusters/{1}/services/{2}'
for service in services:
# Make sure the service is deployed and is managed by Ambari
if service.deployed and service.ambari_managed:
result = self._post(add_service_url.format(
ambari_info.get_address(), name, service.name),
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create service command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add services to cluster: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add components to services"), param=('ambari_info', 2))
def _add_components_to_services(self, cluster_spec, ambari_info, name):
add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{'
'2}/components/{3}')
for service in cluster_spec.services:
# Make sure the service is deployed and is managed by Ambari
if service.deployed and service.ambari_managed:
for component in service.components:
result = self._post(add_component_url.format(
ambari_info.get_address(), name, service.name,
component.name),
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create component command failed. {result}')
.format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add components to services: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Add host and components"), param=('ambari_info', 3))
def _add_hosts_and_components(
self, cluster_spec, servers, ambari_info, name):
add_host_url = 'http://{0}/api/v1/clusters/{1}/hosts/{2}'
add_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}')
for host in servers:
with context.set_current_instance_id(host.instance['instance_id']):
hostname = host.instance.fqdn().lower()
result = self._post(
add_host_url.format(ambari_info.get_address(), name,
hostname), ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host: %s') % result.text)
node_group_name = host.node_group.name
# TODO(jspeidel): ensure that node group exists
node_group = cluster_spec.node_groups[node_group_name]
for component in node_group.components:
# Don't add any AMBARI or HUE components
# TODO(rlevas): Pragmatically determine if component is
# managed by Ambari
if (component.find('AMBARI') != 0
and component.find('HUE') != 0):
result = self._post(add_host_component_url.format(
ambari_info.get_address(), name, hostname,
component), ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host_component command failed. '
'{result}').format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host component: %s')
% result.text)
@cpo.event_wrapper(
True, step=_("Install services"), param=('ambari_info', 2))
def _install_services(self, cluster_name, ambari_info):
ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{'
'1}/services?ServiceInfo/state=INIT'.format(
ambari_address, cluster_name))
body = ('{"RequestInfo" : { "context" : "Install all services" },'
'"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')
result = self._put(install_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(self._get_async_request_uri(
ambari_info, cluster_name, request_id),
ambari_info)
if success:
LOG.info(_LI("Install of Hadoop stack successful."))
self._finalize_ambari_state(ambari_info)
else:
LOG.error(_LE('Install command failed.'))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Install command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
def _get_async_request_uri(self, ambari_info, cluster_name, request_id):
return ('http://{0}/api/v1/clusters/{1}/requests/{'
'2}/tasks?fields=Tasks/status'.format(
ambari_info.get_address(), cluster_name,
request_id))
# Returns the top-level requests API URI
def _get_command_request_uri(self, ambari_info, cluster_name):
return ('http://{0}/api/v1/clusters/{1}/requests'.format(
ambari_info.get_address(), cluster_name))
def _wait_for_async_request(self, request_url, ambari_info):
started = False
while not started:
result = self._get(request_url, ambari_info)
LOG.debug('Async request url: {url} response:\n{response}'.format(
url=request_url, response=result.text))
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
status = items['Tasks']['status']
if (status == 'FAILED' or status == 'ABORTED' or
status == 'TIMEDOUT'):
return False
else:
if status != 'COMPLETED':
started = False
context.sleep(5)
return started
def _finalize_ambari_state(self, ambari_info):
persist_state_uri = 'http://{0}/api/v1/persist'.format(
ambari_info.get_address())
# this post data has non-standard format because persist
# resource doesn't comply with Ambari API standards
persist_data = ('{ "CLUSTER_CURRENT_STATUS":'
'"{\\"clusterState\\":\\"CLUSTER_STARTED_5\\"}" }')
result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202:
LOG.warning(_LW('Ambari cluster state not finalized. {result}').
format(result=result.text))
raise ex.HadoopProvisionError(
_('Unable to finalize Ambari state.'))
LOG.info(_LI('Ambari cluster state finalized.'))
@cpo.event_wrapper(
True, step=_("Start services"), param=('ambari_info', 3))
def start_services(self, cluster_name, cluster_spec, ambari_info):
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name))
body = ('{"RequestInfo" : { "context" : "Start all services" },'
'"Body" : {"ServiceInfo": {"state" : "STARTED"}}}')
self._fire_service_start_notifications(
cluster_name, cluster_spec, ambari_info)
result = self._put(start_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(
self._get_async_request_uri(ambari_info, cluster_name,
request_id), ambari_info)
if success:
LOG.info(_LI("Successfully started Hadoop cluster"))
LOG.info(_LI('Ambari server address: {server_address}').format(
server_address=ambari_info.get_address()))
else:
LOG.error(_LE('Failed to start Hadoop cluster.'))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Start command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
def _exec_ambari_command(self, ambari_info, body, cmd_uri):
LOG.debug('PUT URI: {uri}'.format(uri=cmd_uri))
result = self._put(cmd_uri, ambari_info, data=body)
if result.status_code == 202:
LOG.debug(
'PUT response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
href = json_result['href'] + '/tasks?fields=Tasks/status'
success = self._wait_for_async_request(href, ambari_info)
if success:
LOG.info(
_LI("Successfully changed state of Hadoop components "))
else:
LOG.error(_LE('Failed to change state of Hadoop components'))
raise ex.HadoopProvisionError(
_('Failed to change state of Hadoop components'))
else:
LOG.error(
_LE('Command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.'))
def _get_host_list(self, servers):
host_list = [server.instance.fqdn().lower() for server in servers]
return ",".join(host_list)
def _install_and_start_components(self, cluster_name, servers,
ambari_info, cluster_spec):
auth = (ambari_info.user, ambari_info.password)
self._install_components(ambari_info, auth, cluster_name, servers)
self.handler.install_swift_integration(servers)
self._start_components(ambari_info, auth, cluster_name,
servers, cluster_spec)
def _install_components(self, ambari_info, auth, cluster_name, servers):
# query for the host components on the given hosts that are in the
# INIT state
# TODO(jspeidel): provide request context
body = '{"HostRoles": {"state" : "INSTALLED"}}'
install_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INIT&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
self._exec_ambari_command(ambari_info, body, install_uri)
LOG.info(_LI('Started Hadoop components while scaling up'))
LOG.info(_LI('Ambari server ip {ip}').format(
ip=ambari_info.get_address()))
def _start_components(self, ambari_info, auth, cluster_name, servers,
cluster_spec):
# query for all the host components in the INSTALLED state,
# then get a list of the client services in the list
installed_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
result = self._get(installed_uri, ambari_info)
if result.status_code == 200:
LOG.debug(
'GET response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
items = json_result['items']
client_set = cluster_spec.get_components_for_type('CLIENT')
inclusion_list = list(set([x['HostRoles']['component_name']
for x in items
if x['HostRoles']['component_name']
not in client_set]))
# query and start all non-client components on the given set of
# hosts
# TODO(jspeidel): Provide request context
body = '{"HostRoles": {"state" : "STARTED"}}'
start_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'
'&HostRoles/component_name.in({3})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers),
",".join(inclusion_list)))
self._exec_ambari_command(ambari_info, body, start_uri)
else:
raise ex.HadoopProvisionError(
_('Unable to determine installed service '
'components in scaled instances. status'
' code returned = {0}').format(result.status))
def _check_host_registrations(self, num_hosts, ambari_info):
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
try:
result = self._get(url, ambari_info)
json_result = json.loads(result.text)
LOG.debug('Registered Hosts: {current_number} '
'of {final_number}'.format(
current_number=len(json_result['items']),
final_number=num_hosts))
for hosts in json_result['items']:
LOG.debug('Registered Host: {host}'.format(
host=hosts['Hosts']['host_name']))
return result and len(json_result['items']) >= num_hosts
except Exception:
LOG.debug('Waiting to connect to ambari server')
return False
@cpo.event_wrapper(True, step=_("Wait for all Ambari agents to register"),
param=('ambari_info', 2))
def wait_for_host_registrations(self, num_hosts, ambari_info):
cluster = ambari_info.get_cluster()
poll_utils.plugin_option_poll(
cluster, self._check_host_registrations,
cfgprov.HOST_REGISTRATIONS_TIMEOUT,
_("Wait for host registrations"), 5, {
'num_hosts': num_hosts, 'ambari_info': ambari_info})
def update_ambari_admin_user(self, password, ambari_info):
old_pwd = ambari_info.password
user_url = 'http://{0}/api/v1/users/admin'.format(
ambari_info.get_address())
update_body = ('{{"Users":{{"roles":"admin","password":"{0}",'
'"old_password":"{1}"}} }}'.format(password, old_pwd))
result = self._put(user_url, ambari_info, data=update_body)
if result.status_code != 200:
raise ex.HadoopProvisionError(_('Unable to update Ambari admin '
'user credentials: {0}').format(
result.text))
def add_ambari_user(self, user, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user.name)
create_body = ('{{"Users":{{"password":"{0}","roles":"{1}"}} }}'.
format(user.password, '%s' % ','.
join(map(str, user.groups))))
result = self._post(user_url, ambari_info, data=create_body)
if result.status_code != 201:
raise ex.HadoopProvisionError(
_('Unable to create Ambari user: {0}').format(result.text))
def delete_ambari_user(self, user_name, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user_name)
result = self._delete(user_url, ambari_info)
if result.status_code != 200:
raise ex.HadoopProvisionError(
_('Unable to delete Ambari user: %(user_name)s'
' : %(text)s') %
{'user_name': user_name, 'text': result.text})
def configure_scaled_cluster_instances(self, name, cluster_spec,
num_hosts, ambari_info):
self.wait_for_host_registrations(num_hosts, ambari_info)
self._add_configurations_to_cluster(
cluster_spec, ambari_info, name)
self._add_services_to_cluster(
cluster_spec, ambari_info, name)
self._add_components_to_services(
cluster_spec, ambari_info, name)
self._install_services(name, ambari_info)
def start_scaled_cluster_instances(self, name, cluster_spec, servers,
ambari_info):
self.start_services(name, cluster_spec, ambari_info)
self._add_hosts_and_components(
cluster_spec, servers, ambari_info, name)
self._install_and_start_components(
name, servers, ambari_info, cluster_spec)
@cpo.event_wrapper(
True, step=_("Decommission nodes"), param=('cluster', 1))
def decommission_cluster_instances(self, cluster, clusterspec, instances,
ambari_info):
request_uri = self._get_command_request_uri(ambari_info, cluster.name)
hosts_to_decommission = []
# Decommission HDFS datanodes to avoid loss of data
# during decommissioning process
for instance in instances:
ng_name = instance.node_group.name
if "DATANODE" in clusterspec.node_groups[ng_name].components:
# determine the instances that include HDFS support
hosts_to_decommission.append(instance.fqdn())
LOG.debug('AmbariClient: hosts_to_decommission = {hosts}'.format(
hosts=str(hosts_to_decommission)))
# template for request body
body_header = ('{"RequestInfo" : { "context": "Decommission DataNode",'
' "command" : "DECOMMISSION", "service_name" : "HDFS",'
' "component_name" : "NAMENODE", '
' "parameters" : { "slave_type" : "DATANODE", ')
excluded_hosts_request = '"excluded_hosts" : "{0}"'
# generate comma-separated list of hosts to de-commission
list_of_hosts = ",".join(hosts_to_decommission)
LOG.debug('AmbariClient: list_of_hosts = {hosts}'.format(
hosts=list_of_hosts))
# create the request body
request_body = (
body_header +
excluded_hosts_request.format(list_of_hosts)
+ '}}'
+ ', "Requests/resource_filters":[{"service_name":"HDFS",'
'"component_name":"NAMENODE"}]}')
LOG.debug('AmbariClient: about to make decommission request, uri = '
'{uri}'.format(uri=request_uri))
LOG.debug('AmbariClient: about to make decommission request, '
'request body = {body}'.format(body=request_body))
# ask Ambari to decommission the datanodes
result = self._post(request_uri, ambari_info, request_body)
if result.status_code != 202:
LOG.error(_LE('AmbariClient: error while making decommission post '
'request. Error is = {result}').format(
result=result.text))
raise ex.DecommissionError(
_('An error occurred while trying to '
'decommission the DataNode instances that are '
'being shut down. '
'Please consult the Ambari server logs on the '
'master node for '
'more information about the failure.'))
else:
LOG.info(_LI('AmbariClient: decommission post request succeeded!'))
status_template = ('http://{0}/api/v1/clusters/{1}/hosts/{2}/'
'host_components/{3}')
# find the host that the NameNode is deployed on
name_node_host = clusterspec.determine_component_hosts(
'NAMENODE').pop()
status_request = status_template.format(
ambari_info.get_address(),
cluster.name, name_node_host.fqdn(),
'NAMENODE')
LOG.debug('AmbariClient: about to make decommission status request,'
'uri = {uri}'.format(uri=status_request))
poll_utils.plugin_option_poll(
ambari_info.get_cluster(),
self.process_decommission,
cfgprov.DECOMMISSIONING_TIMEOUT, _("Decommission nodes"), 5,
{'status_request': status_request, 'ambari_info': ambari_info,
'hosts_to_decommission': hosts_to_decommission})
def process_decommission(self, status_request, ambari_info,
hosts_to_decommission):
if len(hosts_to_decommission) == 0:
# Nothing for decommissioning
return True
LOG.debug('AmbariClient: number of hosts waiting for '
'decommissioning to complete = {count}'.format(
count=str(len(hosts_to_decommission))))
result = self._get(status_request, ambari_info)
if result.status_code != 200:
LOG.error(_LE('AmbariClient: error in making decommission '
'status request, error = {result}').format(
result=result.text))
else:
LOG.info(_LI('AmbariClient: decommission status request ok, '
'result = {result}').format(result=result.text))
json_result = json.loads(result.text)
live_nodes = (
json_result['metrics']['dfs']['namenode']['LiveNodes'])
# parse out the map of live hosts associated with the NameNode
json_result_nodes = json.loads(live_nodes)
for node, val in six.iteritems(json_result_nodes):
admin_state = val['adminState']
if admin_state == 'Decommissioned':
LOG.debug('AmbariClient: node = {node} is '
'now in adminState = {admin_state}'.format(
node=node, admin_state=admin_state))
# remove from list, to track which nodes
# are now in Decommissioned state
hosts_to_decommission.remove(node)
return False
def provision_cluster(self, cluster_spec, servers, ambari_info, name):
self._add_cluster(ambari_info, name)
self._add_configurations_to_cluster(cluster_spec, ambari_info, name)
self._add_services_to_cluster(cluster_spec, ambari_info, name)
self._add_components_to_services(cluster_spec, ambari_info, name)
self._add_hosts_and_components(
cluster_spec, servers, ambari_info, name)
self._install_services(name, ambari_info)
self.handler.install_swift_integration(servers)
def cleanup(self, ambari_info):
try:
ambari_info.host.remote().close_http_session(ambari_info.port)
except exc.NotFoundException:
LOG.debug("HTTP session is not cached")
def _get_services_in_state(self, cluster_name, ambari_info, state):
services_url = ('http://{0}/api/v1/clusters/{1}/services?'
'ServiceInfo/state.in({2})'.format(
ambari_info.get_address(), cluster_name, state))
result = self._get(services_url, ambari_info)
json_result = json.loads(result.text)
services = []
for service in json_result['items']:
services.append(service['ServiceInfo']['service_name'])
return services
def _fire_service_start_notifications(self, cluster_name,
cluster_spec, ambari_info):
started_services = self._get_services_in_state(
cluster_name, ambari_info, 'STARTED')
for service in cluster_spec.services:
if service.deployed and service.name not in started_services:
service.pre_service_start(cluster_spec, ambari_info,
started_services)
def setup_hdfs_ha(self, cluster_spec, servers, ambari_info, name):
# Get HA cluster map
hac = self._hdfs_ha_cluster_map(cluster_spec, servers,
ambari_info, name)
# start active namenode in order to format and save namesapce
self._hdfs_ha_update_host_component(hac, hac['nn_active'],
'NAMENODE', 'STARTED')
hac['server_active'].set_namenode_safemode(hac['java_home'])
hac['server_active'].save_namenode_namespace(hac['java_home'])
# shutdown active namenode
self._hdfs_ha_update_host_component(hac, hac['nn_active'],
'NAMENODE', 'INSTALLED')
# Install HDFS_CLIENT on namenodes, to be used later for updating
# HDFS configs
if hac['nn_active'] not in hac['hdfsc_hosts']:
self._hdfs_ha_add_host_component(hac, hac['nn_active'],
'HDFS_CLIENT')
if hac['nn_standby'] not in hac['hdfsc_hosts']:
self._hdfs_ha_add_host_component(hac, hac['nn_standby'],
'HDFS_CLIENT')
# start the journal_nodes
for jn in hac['jn_hosts']:
self._hdfs_ha_update_host_component(hac, jn,
'JOURNALNODE', 'STARTED')
# disable any secondary namnodes
for snn in hac['snn_hosts']:
self._hdfs_ha_update_host_component(hac, snn,
'SECONDARY_NAMENODE',
'DISABLED')
# get hdfs-site config tag
hdfs_site_tag = self._hdfs_ha_get_config_tag(hac, 'hdfs-site')
# get hdfs-site config
hdfs_site = self._hdfs_ha_get_config(hac, 'hdfs-site', hdfs_site_tag)
# update hdfs-site with HDFS HA properties
hdfs_site_ha = self._hdfs_ha_update_hdfs_site(hac, hdfs_site)
# put new hdfs-site config
self._hdfs_ha_put_config(hac, 'hdfs-site', hac['config_ver'],
hdfs_site_ha)
# get core-site tag
core_site_tag = self._hdfs_ha_get_config_tag(hac, 'core-site')
# get core-site config
core_site = self._hdfs_ha_get_config(hac, 'core-site', core_site_tag)
# update core-site with HDFS HA properties
core_site_ha = self._hdfs_ha_update_core_site(hac, core_site)
# put new HA core-site config
self._hdfs_ha_put_config(hac, 'core-site', hac['config_ver'],
core_site_ha)
# update hbase-site if Hbase is installed
if hac['hbase_hosts']:
hbase_site_tag = self._hdfs_ha_get_config_tag(hac, 'hbase-site')
hbase_site = self._hdfs_ha_get_config(hac, 'hbase-site',
hbase_site_tag)
hbase_site_ha = self._hdfs_ha_update_hbase_site(hac, hbase_site)
self._hdfs_ha_put_config(hac, 'hbase_site', hac['config_ver'],
hbase_site_ha)
# force the deployment of HDFS HA configs on namenodes by re-installing
# hdfs-client
self._hdfs_ha_update_host_component(hac, hac['nn_active'],
'HDFS_CLIENT', 'INSTALLED')
self._hdfs_ha_update_host_component(hac, hac['nn_standby'],
'HDFS_CLIENT', 'INSTALLED')
# initialize shared edits on the active namenode
hac['server_active'].initialize_shared_edits(hac['java_home'])
# start zookeeper servers
for zk in hac['zk_hosts']:
self._hdfs_ha_update_host_component(hac, zk,
'ZOOKEEPER_SERVER', 'STARTED')
# start active namenode
self._hdfs_ha_update_host_component(hac, hac['nn_active'],
'NAMENODE', 'STARTED')
# setup active namenode automatic failover
hac['server_active'].format_zookeeper_fc(hac['java_home'])
# format standby namenode
hac['server_standby'].bootstrap_standby_namenode(hac['java_home'])
# start namenode process on standby namenode
self._hdfs_ha_update_host_component(hac, hac['nn_standby'],
'NAMENODE', 'STARTED')
# add, install and start ZKFC on namenodes for automatic fail-over
for nn in hac['nn_hosts']:
self._hdfs_ha_add_host_component(hac, nn, 'ZKFC')
self._hdfs_ha_update_host_component(hac, nn, 'ZKFC', 'INSTALLED')
self._hdfs_ha_update_host_component(hac, nn, 'ZKFC', 'STARTED')
# delete any secondary namenodes
for snn in hac['snn_hosts']:
self._hdfs_ha_delete_host_component(hac, snn, 'SECONDARY_NAMENODE')
# stop journalnodes and namenodes before terminating
# not doing so causes warnings in Ambari for stale config
for jn in hac['jn_hosts']:
self._hdfs_ha_update_host_component(hac, jn, 'JOURNALNODE',
'INSTALLED')
for nn in hac['nn_hosts']:
self._hdfs_ha_update_host_component(hac, nn, 'NAMENODE',
'INSTALLED')
# install httpfs and write temp file if HUE is installed
if hac['hue_host']:
self._hdfs_ha_setup_hue(hac)
def _hdfs_ha_cluster_map(self, cluster_spec, servers, ambari_info, name):
hacluster = {}
hacluster['name'] = name
hacluster['config_ver'] = 'v2'
# set JAVA_HOME
global_config = cluster_spec.configurations.get('global', None)
global_config_jh = (global_config.get('java64_home', None) or
global_config.get('java_home', None) if
global_config else None)
hacluster['java_home'] = global_config_jh or '/opt/jdk1.6.0_31'
# set namnode ports
hacluster['nn_rpc'] = '8020'
hacluster['nn_ui'] = '50070'
hacluster['ambari_info'] = ambari_info
# get host lists
hacluster['nn_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'NAMENODE')]
hacluster['snn_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'SECONDARY_NAMENODE')]
hacluster['jn_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'JOURNALNODE')]
hacluster['zk_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'ZOOKEEPER_SERVER')]
hacluster['hdfsc_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'HDFS_CLIENT')]
hacluster['hbase_hosts'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts(
'HBASE_MASTER')]
hacluster['hue_host'] = [x.fqdn().lower() for x in
cluster_spec.determine_component_hosts('HUE')]
# get servers for remote command execution
# consider hacluster['nn_hosts'][0] as active namenode
hacluster['nn_active'] = hacluster['nn_hosts'][0]
hacluster['nn_standby'] = hacluster['nn_hosts'][1]
# get the 2 namenode servers and hue server
for server in servers:
if server.instance.fqdn().lower() == hacluster['nn_active']:
hacluster['server_active'] = server
if server.instance.fqdn().lower() == hacluster['nn_standby']:
hacluster['server_standby'] = server
if hacluster['hue_host']:
if server.instance.fqdn().lower() == hacluster['hue_host'][0]:
hacluster['server_hue'] = server
return hacluster
def _hdfs_ha_delete_host_component(self, hac, host, component):
delete_service_component_url = ('http://{0}/api/v1/clusters/{1}/hosts'
'/{2}/host_components/{3}').format(
hac['ambari_info'].get_address(),
hac['name'], host, component)
result = self._delete(delete_service_component_url, hac['ambari_info'])
if result.status_code != 200:
LOG.error(_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_add_host_component(self, hac, host, component):
add_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}').format(
hac['ambari_info'].get_address(),
hac['name'], host, component)
result = self._post(add_host_component_url, hac['ambari_info'])
if result.status_code != 201:
LOG.error(_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_update_host_component(self, hac, host, component, state):
update_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}').format(
hac['ambari_info'].get_address(),
hac['name'], host, component)
component_state = {"HostRoles": {"state": state}}
body = json.dumps(component_state)
result = self._put(update_host_component_url,
hac['ambari_info'], data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(self._get_async_request_uri(
hac['ambari_info'], hac['name'], request_id),
hac['ambari_info'])
if success:
LOG.info(_LI("HDFS-HA: Host component updated successfully: "
"{host} {component}").format(host=host,
component=component))
else:
LOG.error(_LE("HDFS-HA: Host component update failed: "
"{host} {component}").format(
host=host, component=component))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
elif result.status_code != 200:
LOG.error(
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_get_config_tag(self, hac, config_name):
config_url = ('http://{0}/api/v1/clusters/{1}'
'/configurations?type={2}').format(
hac['ambari_info'].get_address(), hac['name'],
config_name)
result = self._get(config_url, hac['ambari_info'])
if result.status_code == 200:
json_result = json.loads(result.text)
items = json_result['items']
return items[0]['tag']
else:
LOG.error(
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_get_config(self, hac, config_name, tag):
config_url = ('http://{0}/api/v1/clusters/{1}'
'/configurations?type={2}&tag={3}').format(
hac['ambari_info'].get_address(), hac['name'],
config_name, tag)
result = self._get(config_url, hac['ambari_info'])
if result.status_code == 200:
json_result = json.loads(result.text)
items = json_result['items']
return items[0]['properties']
else:
LOG.error(
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_put_config(self, hac, config_name, tag, properties):
config_url = ('http://{0}/api/v1/clusters/{1}').format(
hac['ambari_info'].get_address(), hac['name'])
body = {}
clusters = {}
body['Clusters'] = clusters
body['Clusters']['desired_config'] = {}
body['Clusters']['desired_config']['type'] = config_name
body['Clusters']['desired_config']['tag'] = tag
body['Clusters']['desired_config']['properties'] = properties
LOG.debug("body: {body}".format(body=body))
result = self._put(config_url, hac['ambari_info'],
data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
_('Configuring HDFS HA failed. %s') % result.text)
def _hdfs_ha_update_hdfs_site(self, hac, hdfs_site):
hdfs_site['dfs.nameservices'] = hac['name']
hdfs_site['dfs.ha.namenodes.{0}'.format(
hac['name'])] = hac['nn_active'] + ',' + hac['nn_standby']
hdfs_site['dfs.namenode.rpc-address.{0}.{1}'.format(
hac['name'], hac['nn_active'])] = '{0}:{1}'.format(
hac['nn_active'], hac['nn_rpc'])
hdfs_site['dfs.namenode.rpc-address.{0}.{1}'.format(
hac['name'], hac['nn_standby'])] = '{0}:{1}'.format(
hac['nn_standby'], hac['nn_rpc'])
hdfs_site['dfs.namenode.http-address.{0}.{1}'.format(
hac['name'], hac['nn_active'])] = '{0}:{1}'.format(
hac['nn_active'], hac['nn_ui'])
hdfs_site['dfs.namenode.http-address.{0}.{1}'.format(
hac['name'], hac['nn_standby'])] = '{0}:{1}'.format(
hac['nn_standby'], hac['nn_ui'])
qjournal = ';'.join([x+':8485' for x in hac['jn_hosts']])
hdfs_site['dfs.namenode.shared.edits.dir'] = ('qjournal://{0}/{1}'.
format(qjournal,
hac['name']))
hdfs_site['dfs.client.failover.proxy.provider.{0}'.format(
hac['name'])] = ("org.apache.hadoop.hdfs.server.namenode.ha."
"ConfiguredFailoverProxyProvider")
hdfs_site['dfs.ha.fencing.methods'] = 'shell(/bin/true)'
hdfs_site['dfs.ha.automatic-failover.enabled'] = 'true'
return hdfs_site
def _hdfs_ha_update_core_site(self, hac, core_site):
core_site['fs.defaultFS'] = 'hdfs://{0}'.format(hac['name'])
core_site['ha.zookeeper.quorum'] = '{0}'.format(
','.join([x+':2181' for x in hac['zk_hosts']]))
# if HUE is installed add some httpfs configs
if hac['hue_host']:
core_site['hadoop.proxyuser.httpfs.groups'] = '*'
core_site['hadoop.proxyuser.httpfs.hosts'] = '*'
return core_site
def _hdfs_ha_update_hbase_site(self, hac, hbase_site):
hbase_site['hbase.rootdir'] = 'hdfs://{0}/apps/hbase/data'.format(
hac['name'])
return hbase_site
def _hdfs_ha_setup_hue(self, hac):
hac['server_hue'].install_httpfs()
# write a temp file and
# use it when starting HUE with HDFS HA enabled
hac['server_hue'].write_hue_temp_file('/tmp/hueini-hdfsha',
hac['name'])
|
|
# Copyright 2010-2011, Sikuli.org
# Released under the MIT License.
##
# This module provides a Jython interface of Sikuli Script to automate GUI
# interactions.
##
from __future__ import with_statement
import java.io.File
import time
from org.sikuli.script import SikuliScript
from org.sikuli.script import Match
from org.sikuli.script import Pattern
from org.sikuli.script import FindFailed
from org.sikuli.script import SikuliEvent
import __builtin__
import __main__
import types
import sys
from org.sikuli.script import Region as JRegion
from org.sikuli.script import UnionScreen
from org.sikuli.script import Finder
from org.sikuli.script import Location
from org.sikuli.script import Settings
from org.sikuli.script import OS
from org.sikuli.script import App
from org.sikuli.script import ScreenHighlighter
from org.sikuli.script import ImageLocator
from org.sikuli.script import Key
from org.sikuli.script import KeyModifier
from org.sikuli.script.KeyModifier import KEY_CTRL, KEY_SHIFT, KEY_META, KEY_CMD, KEY_WIN, KEY_ALT
from org.sikuli.script import Button
from java.awt import Rectangle
from Region import *
from Screen import *
from VDict import *
from Helper import *
from Env import *
import SikuliImporter
_si = SikuliScript()
##
# loads a Sikuli extension (.jar) from
# 1. user's sikuli data path
# 2. bundle path
#
def load(jar):
import os
from org.sikuli.script import ExtensionManager
def _load(abspath):
if os.path.exists(abspath):
if not abspath in sys.path:
sys.path.append(abspath)
return True
return False
if _load(jar):
return True
path = getBundlePath()
if path:
jarInBundle = path + java.io.File.separator + jar
if _load(jarInBundle):
return True
path = ExtensionManager.getInstance().getUserExtPath()
jarInExtPath = path + java.io.File.separator + jar
if _load(jarInExtPath):
return True
return False
def addModPath(path):
import os
if path[:-1] == os.sep:
path = path[:-1]
if not path in sys.path:
sys.path.append(path)
def addImagePath(path):
ImageLocator.addImagePath(path)
def getImagePath():
return ImageLocator.getImagePath()
def removeImagePath(path):
ImageLocator.removeImagePath(path)
##
# Sets the path for searching images in all Sikuli Script methods. <br/>
# Sikuli IDE sets this to the path of the bundle of source code (.sikuli)
# automatically. If you write Sikuli scripts by the Sikuli IDE, you should
# not call this method.
#
def setBundlePath(path):
if path[-1:] == java.io.File.separator:
path = path[:-1]
Settings.BundlePath = path
def getBundlePath():
return Settings.BundlePath
##
# Sikuli shows actions (click, dragDrop, ... etc.) if this flag is set to <i>True</i>.
# The default setting is <i>False</i>.
#
def setShowActions(flag):
_si.setShowActions(flag)
##
# Shows a question-message dialog requesting input from the user.
# @param msg The message to display.
# @param default The preset text of the input field.
# @return The user's input string.
#
def input(msg="", default=""):
return _si.input(msg, default)
def capture(*args):
scr = UnionScreen()
if len(args) == 0:
simg = scr.userCapture()
if simg:
return simg.getFilename()
else:
return None
elif len(args) == 1:
if __builtin__.type(args[0]) is types.StringType or __builtin__.type(args[0]) is types.UnicodeType:
simg = scr.userCapture(args[0])
if simg:
return simg.getFilename()
else:
return None
else:
return scr.capture(args[0]).getFilename()
elif len(args) == 4:
return scr.capture(args[0], args[1], args[2], args[3]).getFilename()
else:
return None
def selectRegion(msg=None):
if msg:
r = UnionScreen().selectRegion(msg)
else:
r = UnionScreen().selectRegion()
if r:
return Region(r)
else:
return None
##
# Switches the frontmost application to the given application.
# If the given application is not running, it will be launched by openApp()
# automatically. <br/>
# Note: On Windows, Sikule searches in the text on the title bar
# instead of the application name.
# @param app The name of the application. (case-insensitive)
#
def switchApp(app):
return _si.switchApp(app)
##
# Opens the given application. <br/>
# @param app The name of an application if it is in the environment variable PATH, or the full path to an application.
#
def openApp(app):
return _si.openApp(app)
##
# Closes the given application. <br/>
# @param app The name of the application. (case-insensitive)
#
def closeApp(app):
return _si.closeApp(app)
##
# Sleeps until the given amount of time in seconds has elapsed.
# @param sec The amount of sleeping time in seconds.
def sleep(sec):
time.sleep(sec)
##
# Shows a message dialog containing the given message.
# @param msg The given message string.
def popup(msg, title="Sikuli"):
_si.popup(msg, title)
def exit(code=0):
ScreenHighlighter.closeAll()
sys.exit(code)
##
# Runs the given string command.
# @param msg The given string command.
# @return Returns the output from the executed command.
def run(cmd):
return _si.run(cmd)
############### SECRET FUNCTIONS ################
def getSikuliScript():
return _si
def initSikuli():
dict = globals()
dict['SCREEN'] = Screen()
dict['SCREEN']._exposeAllMethods(__name__)
#print "Sikuli is initialized. ", id(dict['SCREEN'])
initSikuli()
|
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def metric_accessors():
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
# regression
response_col = "economy"
distribution = "gaussian"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = H2OGradientBoostingEstimator(nfolds=3,
distribution=distribution,
fold_assignment="Random")
gbm.train(x=predictors, y=response_col, training_frame=train, validation_frame=valid)
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in list(mse.keys()) and "valid" in list(mse.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in list(mse.keys()) and "xval" in list(mse.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in list(mse.keys()) and "valid" in list(mse.keys()) and "xval" in list(mse.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in list(mse.keys()) and "xval" in list(mse.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# mean_residual_deviance
mean_residual_deviance1 = gbm.mean_residual_deviance(train=True, valid=False, xval=False)
assert isinstance(mean_residual_deviance1, float)
mean_residual_deviance2 = gbm.mean_residual_deviance(train=False, valid=True, xval=False)
assert isinstance(mean_residual_deviance2, float)
mean_residual_deviance3 = gbm.mean_residual_deviance(train=False, valid=False, xval=True)
assert isinstance(mean_residual_deviance3, float)
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=False)
assert "train" in list(mean_residual_deviance.keys()) and "valid" in list(mean_residual_deviance.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert len(mean_residual_deviance) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]))
assert mean_residual_deviance["valid"] == mean_residual_deviance2
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=False, xval=True)
assert "train" in list(mean_residual_deviance.keys()) and "xval" in list(mean_residual_deviance.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert len(mean_residual_deviance) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["xval"]))
assert mean_residual_deviance["xval"] == mean_residual_deviance3
mean_residual_deviance = gbm.mean_residual_deviance(train=True, valid=True, xval=True)
assert "train" in list(mean_residual_deviance.keys()) and "valid" in list(mean_residual_deviance.keys()) and "xval" in list(mean_residual_deviance.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert len(mean_residual_deviance) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert isinstance(mean_residual_deviance["train"], float) and isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mean_residual_deviance["train"]), type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mean_residual_deviance, float)
assert mean_residual_deviance == mean_residual_deviance1
mean_residual_deviance = gbm.mean_residual_deviance(train=False, valid=True, xval=True)
assert "valid" in list(mean_residual_deviance.keys()) and "xval" in list(mean_residual_deviance.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert len(mean_residual_deviance) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(mean_residual_deviance.keys()))
assert isinstance(mean_residual_deviance["valid"], float) and isinstance(mean_residual_deviance["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mean_residual_deviance["valid"]), type(mean_residual_deviance["xval"]))
# binomial
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "economy_20mpg"
distribution = "bernoulli"
predictors = ["displacement","power","weight","acceleration","year"]
gbm = H2OGradientBoostingEstimator(nfolds=3, distribution=distribution, fold_assignment="Random")
gbm.train(y=response_col, x=predictors, validation_frame=valid, training_frame=train)
# auc
auc1 = gbm.auc(train=True, valid=False, xval=False)
assert isinstance(auc1, float)
auc2 = gbm.auc(train=False, valid=True, xval=False)
assert isinstance(auc2, float)
auc3 = gbm.auc(train=False, valid=False, xval=True)
assert isinstance(auc3, float)
auc = gbm.auc(train=True, valid=True, xval=False)
assert "train" in list(auc.keys()) and "valid" in list(auc.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert len(auc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["valid"]))
assert auc["valid"] == auc2
auc = gbm.auc(train=True, valid=False, xval=True)
assert "train" in list(auc.keys()) and "xval" in list(auc.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert len(auc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert isinstance(auc["train"], float) and isinstance(auc["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["train"]), type(auc["xval"]))
assert auc["xval"] == auc3
auc = gbm.auc(train=True, valid=True, xval=True)
assert "train" in list(auc.keys()) and "valid" in list(auc.keys()) and "xval" in list(auc.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert len(auc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert isinstance(auc["train"], float) and isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(auc["train"]), type(auc["valid"]), type(auc["xval"]))
auc = gbm.auc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(auc, float)
assert auc == auc1
auc = gbm.auc(train=False, valid=True, xval=True)
assert "valid" in list(auc.keys()) and "xval" in list(auc.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert len(auc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(auc.keys()))
assert isinstance(auc["valid"], float) and isinstance(auc["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(auc["valid"]), type(auc["xval"]))
# roc
(fprs1, tprs1) = gbm.roc(train=True, valid=False, xval=False)
assert isinstance(fprs1, list)
assert isinstance(tprs1, list)
(fprs2, tprs2) = gbm.roc(train=False, valid=True, xval=False)
assert isinstance(fprs2, list)
assert isinstance(tprs2, list)
(fprs3, tprs3) = gbm.roc(train=False, valid=False, xval=True)
assert isinstance(fprs3, list)
assert isinstance(tprs3, list)
roc = gbm.roc(train=True, valid=True, xval=False)
assert "train" in list(roc.keys()) and "valid" in list(roc.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert len(roc) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple), "expected training and validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["valid"]))
assert roc["valid"][0] == fprs2
assert roc["valid"][1] == tprs2
roc = gbm.roc(train=True, valid=False, xval=True)
assert "train" in list(roc.keys()) and "xval" in list(roc.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert len(roc) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert isinstance(roc["train"], tuple) and isinstance(roc["xval"], tuple), "expected training and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["train"]), type(roc["xval"]))
assert roc["xval"][0] == fprs3
assert roc["xval"][1] == tprs3
roc = gbm.roc(train=True, valid=True, xval=True)
assert "train" in list(roc.keys()) and "valid" in list(roc.keys()) and "xval" in list(roc.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert len(roc) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert isinstance(roc["train"], tuple) and isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "expected training, validation, and cross validation metrics to be tuples, but got {0}, {1}, and {2}".format(type(roc["train"]), type(roc["valid"]), type(roc["xval"]))
(fprs, tprs) = gbm.roc(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(fprs, list)
assert isinstance(tprs, list)
assert fprs == fprs1
assert tprs == tprs1
roc = gbm.roc(train=False, valid=True, xval=True)
assert "valid" in list(roc.keys()) and "xval" in list(roc.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert len(roc) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(roc.keys()))
assert isinstance(roc["valid"], tuple) and isinstance(roc["xval"], tuple), "validation and cross validation metrics to be tuples, but got {0} and {1}".format(type(roc["valid"]), type(roc["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in list(logloss.keys()) and "valid" in list(logloss.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in list(logloss.keys()) and "valid" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# gini
gini1 = gbm.gini(train=True, valid=False, xval=False)
assert isinstance(gini1, float)
gini2 = gbm.gini(train=False, valid=True, xval=False)
assert isinstance(gini2, float)
gini3 = gbm.gini(train=False, valid=False, xval=True)
assert isinstance(gini3, float)
gini = gbm.gini(train=True, valid=True, xval=False)
assert "train" in list(gini.keys()) and "valid" in list(gini.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert len(gini) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert isinstance(gini["train"], float) and isinstance(gini["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(gini["train"]), type(gini["valid"]))
assert gini["valid"] == gini2
gini = gbm.gini(train=True, valid=False, xval=True)
assert "train" in list(gini.keys()) and "xval" in list(gini.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert len(gini) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert isinstance(gini["train"], float) and isinstance(gini["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(gini["train"]), type(gini["xval"]))
assert gini["xval"] == gini3
gini = gbm.gini(train=True, valid=True, xval=True)
assert "train" in list(gini.keys()) and "valid" in list(gini.keys()) and "xval" in list(gini.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert len(gini) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert isinstance(gini["train"], float) and isinstance(gini["valid"], float) and isinstance(gini["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(gini["train"]), type(gini["valid"]), type(gini["xval"]))
gini = gbm.gini(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(gini, float)
assert gini == gini1
gini = gbm.gini(train=False, valid=True, xval=True)
assert "valid" in list(gini.keys()) and "xval" in list(gini.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert len(gini) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(gini.keys()))
assert isinstance(gini["valid"], float) and isinstance(gini["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(gini["valid"]), type(gini["xval"]))
# F1
F11 = gbm.F1(train=True, valid=False, xval=False)
F12 = gbm.F1(train=False, valid=True, xval=False)
F13 = gbm.F1(train=False, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=False)
F1 = gbm.F1(train=True, valid=False, xval=True)
F1 = gbm.F1(train=True, valid=True, xval=True)
F1 = gbm.F1(train=False, valid=False, xval=False) # default: return training metrics
F1 = gbm.F1(train=False, valid=True, xval=True)
# F0point5
F0point51 = gbm.F0point5(train=True, valid=False, xval=False)
F0point52 = gbm.F0point5(train=False, valid=True, xval=False)
F0point53 = gbm.F0point5(train=False, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=False)
F0point5 = gbm.F0point5(train=True, valid=False, xval=True)
F0point5 = gbm.F0point5(train=True, valid=True, xval=True)
F0point5 = gbm.F0point5(train=False, valid=False, xval=False) # default: return training metrics
F0point5 = gbm.F0point5(train=False, valid=True, xval=True)
# F2
F21 = gbm.F2(train=True, valid=False, xval=False)
F22 = gbm.F2(train=False, valid=True, xval=False)
F23 = gbm.F2(train=False, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=False)
F2 = gbm.F2(train=True, valid=False, xval=True)
F2 = gbm.F2(train=True, valid=True, xval=True)
F2 = gbm.F2(train=False, valid=False, xval=False) # default: return training metrics
F2 = gbm.F2(train=False, valid=True, xval=True)
# accuracy
accuracy1 = gbm.accuracy(train=True, valid=False, xval=False)
accuracy2 = gbm.accuracy(train=False, valid=True, xval=False)
accuracy3 = gbm.accuracy(train=False, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=False)
accuracy = gbm.accuracy(train=True, valid=False, xval=True)
accuracy = gbm.accuracy(train=True, valid=True, xval=True)
accuracy = gbm.accuracy(train=False, valid=False, xval=False) # default: return training metrics
accuracy = gbm.accuracy(train=False, valid=True, xval=True)
# error
error1 = gbm.error(train=True, valid=False, xval=False)
error2 = gbm.error(train=False, valid=True, xval=False)
error3 = gbm.error(train=False, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=False)
error = gbm.error(train=True, valid=False, xval=True)
error = gbm.error(train=True, valid=True, xval=True)
error = gbm.error(train=False, valid=False, xval=False) # default: return training metrics
error = gbm.error(train=False, valid=True, xval=True)
# precision
precision1 = gbm.precision(train=True, valid=False, xval=False)
precision2 = gbm.precision(train=False, valid=True, xval=False)
precision3 = gbm.precision(train=False, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=False)
precision = gbm.precision(train=True, valid=False, xval=True)
precision = gbm.precision(train=True, valid=True, xval=True)
precision = gbm.precision(train=False, valid=False, xval=False) # default: return training metrics
precision = gbm.precision(train=False, valid=True, xval=True)
# mcc
mcc1 = gbm.mcc(train=True, valid=False, xval=False)
mcc2 = gbm.mcc(train=False, valid=True, xval=False)
mcc3 = gbm.mcc(train=False, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=False)
mcc = gbm.mcc(train=True, valid=False, xval=True)
mcc = gbm.mcc(train=True, valid=True, xval=True)
mcc = gbm.mcc(train=False, valid=False, xval=False) # default: return training metrics
mcc = gbm.mcc(train=False, valid=True, xval=True)
# max_per_class_error
max_per_class_error1 = gbm.max_per_class_error(train=True, valid=False, xval=False)
max_per_class_error2 = gbm.max_per_class_error(train=False, valid=True, xval=False)
max_per_class_error3 = gbm.max_per_class_error(train=False, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=False)
max_per_class_error = gbm.max_per_class_error(train=True, valid=False, xval=True)
max_per_class_error = gbm.max_per_class_error(train=True, valid=True, xval=True)
max_per_class_error = gbm.max_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
max_per_class_error = gbm.max_per_class_error(train=False, valid=True, xval=True)
# mean_per_class_error
mean_per_class_error1 = gbm.mean_per_class_error(train=True, valid=False, xval=False)
mean_per_class_error2 = gbm.mean_per_class_error(train=False, valid=True, xval=False)
mean_per_class_error3 = gbm.mean_per_class_error(train=False, valid=False, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=True, xval=False)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=False, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=True, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
mean_per_class_error = gbm.mean_per_class_error(train=False, valid=True, xval=True)
# confusion_matrix
confusion_matrix1 = gbm.confusion_matrix(train=True, valid=False, xval=False)
confusion_matrix2 = gbm.confusion_matrix(train=False, valid=True, xval=False)
confusion_matrix3 = gbm.confusion_matrix(train=False, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=False)
confusion_matrix = gbm.confusion_matrix(train=True, valid=False, xval=True)
confusion_matrix = gbm.confusion_matrix(train=True, valid=True, xval=True)
confusion_matrix = gbm.confusion_matrix(train=False, valid=False, xval=False) # default: return training metrics
confusion_matrix = gbm.confusion_matrix(train=False, valid=True, xval=True)
# # plot
# plot1 = gbm.plot(train=True, valid=False, xval=False)
# plot2 = gbm.plot(train=False, valid=True, xval=False)
# plot3 = gbm.plot(train=False, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=False)
# plot = gbm.plot(train=True, valid=False, xval=True)
# plot = gbm.plot(train=True, valid=True, xval=True)
# plot = gbm.plot(train=False, valid=False, xval=False) # default: return training metrics
# plot = gbm.plot(train=False, valid=True, xval=True)
# # tpr
# tpr1 = gbm.tpr(train=True, valid=False, xval=False)
# tpr2 = gbm.tpr(train=False, valid=True, xval=False)
# tpr3 = gbm.tpr(train=False, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=False)
# tpr = gbm.tpr(train=True, valid=False, xval=True)
# tpr = gbm.tpr(train=True, valid=True, xval=True)
# tpr = gbm.tpr(train=False, valid=False, xval=False) # default: return training metrics
# tpr = gbm.tpr(train=False, valid=True, xval=True)
#
# # tnr
# tnr1 = gbm.tnr(train=True, valid=False, xval=False)
# tnr2 = gbm.tnr(train=False, valid=True, xval=False)
# tnr3 = gbm.tnr(train=False, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=False)
# tnr = gbm.tnr(train=True, valid=False, xval=True)
# tnr = gbm.tnr(train=True, valid=True, xval=True)
# tnr = gbm.tnr(train=False, valid=False, xval=False) # default: return training metrics
# tnr = gbm.tnr(train=False, valid=True, xval=True)
#
# # fnr
# fnr1 = gbm.fnr(train=True, valid=False, xval=False)
# fnr2 = gbm.fnr(train=False, valid=True, xval=False)
# fnr3 = gbm.fnr(train=False, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=False)
# fnr = gbm.fnr(train=True, valid=False, xval=True)
# fnr = gbm.fnr(train=True, valid=True, xval=True)
# fnr = gbm.fnr(train=False, valid=False, xval=False) # default: return training metrics
# fnr = gbm.fnr(train=False, valid=True, xval=True)
#
# # fpr
# fpr1 = gbm.fpr(train=True, valid=False, xval=False)
# fpr2 = gbm.fpr(train=False, valid=True, xval=False)
# fpr3 = gbm.fpr(train=False, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=False)
# fpr = gbm.fpr(train=True, valid=False, xval=True)
# fpr = gbm.fpr(train=True, valid=True, xval=True)
# fpr = gbm.fpr(train=False, valid=False, xval=False) # default: return training metrics
# fpr = gbm.fpr(train=False, valid=True, xval=True)
# multinomial
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
cars["cylinders"] = cars["cylinders"].asfactor()
r = cars[0].runif()
train = cars[r > .2]
valid = cars[r <= .2]
response_col = "cylinders"
distribution = "multinomial"
predictors = ["displacement","power","weight","acceleration","year"]
gbm.distribution="multinomial"
gbm.train(x=predictors,y=response_col, training_frame=train, validation_frame=valid)
# mse
mse1 = gbm.mse(train=True, valid=False, xval=False)
assert isinstance(mse1, float)
mse2 = gbm.mse(train=False, valid=True, xval=False)
assert isinstance(mse2, float)
mse3 = gbm.mse(train=False, valid=False, xval=True)
assert isinstance(mse3, float)
mse = gbm.mse(train=True, valid=True, xval=False)
assert "train" in list(mse.keys()) and "valid" in list(mse.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["valid"]))
assert mse["valid"] == mse2
mse = gbm.mse(train=True, valid=False, xval=True)
assert "train" in list(mse.keys()) and "xval" in list(mse.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["train"]), type(mse["xval"]))
assert mse["xval"] == mse3
mse = gbm.mse(train=True, valid=True, xval=True)
assert "train" in list(mse.keys()) and "valid" in list(mse.keys()) and "xval" in list(mse.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["train"], float) and isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(mse["train"]), type(mse["valid"]), type(mse["xval"]))
mse = gbm.mse(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(mse, float)
assert mse == mse1
mse = gbm.mse(train=False, valid=True, xval=True)
assert "valid" in list(mse.keys()) and "xval" in list(mse.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert len(mse) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(mse.keys()))
assert isinstance(mse["valid"], float) and isinstance(mse["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(mse["valid"]), type(mse["xval"]))
# logloss
logloss1 = gbm.logloss(train=True, valid=False, xval=False)
assert isinstance(logloss1, float)
logloss2 = gbm.logloss(train=False, valid=True, xval=False)
assert isinstance(logloss2, float)
logloss3 = gbm.logloss(train=False, valid=False, xval=True)
assert isinstance(logloss3, float)
logloss = gbm.logloss(train=True, valid=True, xval=False)
assert "train" in list(logloss.keys()) and "valid" in list(logloss.keys()), "expected training and validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected only training and validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float), "expected training and validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["valid"]))
assert logloss["valid"] == logloss2
logloss = gbm.logloss(train=True, valid=False, xval=True)
assert "train" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["train"]), type(logloss["xval"]))
assert logloss["xval"] == logloss3
logloss = gbm.logloss(train=True, valid=True, xval=True)
assert "train" in list(logloss.keys()) and "valid" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected training, validation, and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 3, "expected training, validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["train"], float) and isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "expected training, validation, and cross validation metrics to be floats, but got {0}, {1}, and {2}".format(type(logloss["train"]), type(logloss["valid"]), type(logloss["xval"]))
logloss = gbm.logloss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(logloss, float)
assert logloss == logloss1
logloss = gbm.logloss(train=False, valid=True, xval=True)
assert "valid" in list(logloss.keys()) and "xval" in list(logloss.keys()), "expected validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert len(logloss) == 2, "expected validation and cross validation metrics to be returned, but got {0}".format(list(logloss.keys()))
assert isinstance(logloss["valid"], float) and isinstance(logloss["xval"], float), "validation and cross validation metrics to be floats, but got {0} and {1}".format(type(logloss["valid"]), type(logloss["xval"]))
# hit_ratio_table
hit_ratio_table1 = gbm.hit_ratio_table(train=True, valid=False, xval=False)
hit_ratio_table2 = gbm.hit_ratio_table(train=False, valid=True, xval=False)
hit_ratio_table3 = gbm.hit_ratio_table(train=False, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=False)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=False, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=True, valid=True, xval=True)
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=False, xval=False) # default: return training metrics
hit_ratio_table = gbm.hit_ratio_table(train=False, valid=True, xval=True)
# mean_per_class_error
mean_per_class_error1 = gbm.mean_per_class_error(train=True, valid=False, xval=False)
mean_per_class_error2 = gbm.mean_per_class_error(train=False, valid=True, xval=False)
mean_per_class_error3 = gbm.mean_per_class_error(train=False, valid=False, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=True, xval=False)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=False, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=True, valid=True, xval=True)
mean_per_class_error = gbm.mean_per_class_error(train=False, valid=False, xval=False) # default: return training metrics
mean_per_class_error = gbm.mean_per_class_error(train=False, valid=True, xval=True)
# clustering
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
from h2o.estimators.kmeans import H2OKMeansEstimator
km = H2OKMeansEstimator(k=3, nfolds=3)
km.train(x=list(range(4)), training_frame=iris)
# betweenss
betweenss1 = km.betweenss(train=True, valid=False, xval=False)
assert isinstance(betweenss1, float)
betweenss3 = km.betweenss(train=False, valid=False, xval=True)
assert isinstance(betweenss3, float)
betweenss = km.betweenss(train=True, valid=False, xval=True)
assert "train" in list(betweenss.keys()) and "xval" in list(betweenss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(betweenss.keys()))
assert len(betweenss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(betweenss.keys()))
assert isinstance(betweenss["train"], float) and isinstance(betweenss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(betweenss["train"]), type(betweenss["xval"]))
assert betweenss["xval"] == betweenss3
betweenss = km.betweenss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(betweenss, float)
assert betweenss == betweenss1
# totss
totss1 = km.totss(train=True, valid=False, xval=False)
assert isinstance(totss1, float)
totss3 = km.totss(train=False, valid=False, xval=True)
assert isinstance(totss3, float)
totss = km.totss(train=True, valid=False, xval=True)
assert "train" in list(totss.keys()) and "xval" in list(totss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(totss.keys()))
assert len(totss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(totss.keys()))
assert isinstance(totss["train"], float) and isinstance(totss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(totss["train"]), type(totss["xval"]))
assert totss["xval"] == totss3
totss = km.totss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(totss, float)
assert totss == totss1
# tot_withinss
tot_withinss1 = km.tot_withinss(train=True, valid=False, xval=False)
assert isinstance(tot_withinss1, float)
tot_withinss3 = km.tot_withinss(train=False, valid=False, xval=True)
assert isinstance(tot_withinss3, float)
tot_withinss = km.tot_withinss(train=True, valid=False, xval=True)
assert "train" in list(tot_withinss.keys()) and "xval" in list(tot_withinss.keys()), "expected training and cross validation metrics to be returned, but got {0}".format(list(tot_withinss.keys()))
assert len(tot_withinss) == 2, "expected only training and cross validation metrics to be returned, but got {0}".format(list(tot_withinss.keys()))
assert isinstance(tot_withinss["train"], float) and isinstance(tot_withinss["xval"], float), "expected training and cross validation metrics to be floats, but got {0} and {1}".format(type(tot_withinss["train"]), type(tot_withinss["xval"]))
assert tot_withinss["xval"] == tot_withinss3
tot_withinss = km.tot_withinss(train=False, valid=False, xval=False) # default: return training metrics
assert isinstance(tot_withinss, float)
assert tot_withinss == tot_withinss1
# withinss
withinss1 = km.withinss(train=True, valid=False, xval=False)
withinss3 = km.withinss(train=False, valid=False, xval=True)
withinss = km.withinss(train=True, valid=False, xval=True)
withinss = km.withinss(train=False, valid=False, xval=False) # default: return training metrics
# centroid_stats
centroid_stats1 = km.centroid_stats(train=True, valid=False, xval=False)
centroid_stats3 = km.centroid_stats(train=False, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=True, valid=False, xval=True)
centroid_stats = km.centroid_stats(train=False, valid=False, xval=False) # default: return training metrics
# size
size1 = km.size(train=True, valid=False, xval=False)
size3 = km.size(train=False, valid=False, xval=True)
size = km.size(train=True, valid=False, xval=True)
size = km.size(train=False, valid=False, xval=False) # default: return training metrics
if __name__ == "__main__":
pyunit_utils.standalone_test(metric_accessors)
else:
metric_accessors()
|
|
""" path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.2 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
Date: 7 Mar 2004
"""
# TODO
# - Tree-walking functions don't avoid symlink loops. Matt Harrison sent me a patch for this.
# - Tree-walking functions can't ignore errors. Matt Harrison asked for this.
#
# - Two people asked for path.chdir(). This just seems wrong to me,
# I dunno. chdir() is moderately evil anyway.
#
# - Bug in write_text(). It doesn't support Universal newline mode.
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - guess_content_type() method?
# - Perhaps support arguments to touch().
# - Could add split() and join() methods that generate warnings.
from __future__ import generators
import sys, warnings, os, fnmatch, glob, shutil, codecs, md5
__version__ = '2.1'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for booleans
try:
True, False
except NameError:
True, False = 1, 0
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
isabs = os.path.isabs
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
basename = os.path.basename
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return file(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
f = self.open('rb')
try:
m = md5.new()
while True:
d = f.read(8192)
if not d:
break
m.update(d)
finally:
f.close()
return m.digest()
# --- Methods for querying the filesystem.
exists = os.path.exists
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
getatime = os.path.getatime
atime = property(
getatime, None, None,
""" Last access time of the file. """)
getmtime = os.path.getmtime
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
getctime = os.path.getctime
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
getsize = os.path.getsize
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
|
|
import config
from gmusicapi import Mobileclient
import logging
from gi.repository import Gst, GLib
from collections import deque
from gevent import Greenlet
import gevent
class PlayerStates:
Stopped = "Stopped"
Paused = "Paused"
Playing = "Playing"
class RattleMediaPlayer:
def __init__(self):
self._logger = logging.getLogger('rattlemedia')
Gst.init(None)
self._player = Gst.ElementFactory.make('playbin', None)
if not self._player:
raise Exception('Player is None')
self._player.set_state(Gst.State.NULL)
self._logger.info('Starting to watch for gstreamer signals')
Greenlet.spawn(self.watch_for_message)
def watch_for_message(self):
bus = self._player.get_bus()
if not bus:
raise Exception('Couldn\'t create bus')
# Ideally we'd be using signal_watch on bus to fire on an event basis
# but getting the GLib main loop to work with gevent has proved problematic
# Polling works, but isn't as elegant
while True:
message = bus.pop()
if message:
self._logger.debug('Message received: {0}'.format(message.type))
if message.type == Gst.MessageType.EOS:
self._logger.info('End of stream received')
self.end_of_stream_event_handler()
elif message.type == Gst.MessageType.STATE_CHANGED:
self._logger.debug('State changed {0}'.format(self._player.get_state(100)[1]))
if not message:
gevent.sleep(0.5)
def _set_state(self, state):
try:
if state == PlayerStates.Stopped:
self._player.set_state(Gst.State.NULL)
elif state == PlayerStates.Paused:
self._player.set_state(Gst.State.PAUSED)
elif state == PlayerStates.Playing:
self._player.set_state(Gst.State.PLAYING)
else:
raise Exception('Unknown state')
finally:
self.state_change_event_handler()
def get_state(self):
current_state = self._player.get_state(Gst.CLOCK_TIME_NONE)[1]
if current_state == Gst.State.NULL:
return PlayerStates.Stopped
elif current_state == Gst.State.PAUSED:
return PlayerStates.Paused
elif current_state == Gst.State.PLAYING:
return PlayerStates.Playing
else:
self._logger.error('GStreamer player in unknown state {0}'.format(current_state))
def play_track(self, track_url):
self._player.set_property('uri', track_url)
self._set_state(PlayerStates.Playing)
def stop(self):
self._set_state(PlayerStates.Stopped)
def pause(self):
self._set_state(PlayerStates.Paused)
def play(self):
self._set_state(PlayerStates.Playing)
# Override with function to call on end of stream
def end_of_stream_event_handler(self):
pass
# Override with function to call on state change
def state_change_event_handler(self):
pass
class ControllerState:
def __init__(self, controller, player):
self._player = player
self._controller = controller
self._logger = logging.getLogger('rattlemedia')
def __play_next_track(self):
self._logger.info('Playing')
try:
# This sucks a bit. Should state own the api?
track_url = self._controller._api.get_stream_url(self._controller._queue.popleft(), config.google_device_id)
self._player.play_track(track_url)
except IndexError:
self._logger.info('Queue empty. Stopping.')
self._player.stop()
finally:
self._controller.update_state()
def play(self):
self.__play_next_track()
def stop(self):
self._logger.info('Stopping')
self._player.stop()
def toggle(self):
pass
def next(self):
self.__play_next_track()
class ControllerStatePlaying(ControllerState):
def play(self):
pass
def toggle(self):
self._player.pause()
class ControllerStateStopped(ControllerState):
def stop(self):
pass
def toggle(self):
pass
class ControllerStatePaused(ControllerState):
def play(self):
self._player.play()
def toggle(self):
self.play()
class RattleMediaController:
_states = None
def __init__(self):
api = Mobileclient()
api.login(config.google_username, config.google_password, config.google_device_id)
self._api = api
self._logger = logging.getLogger('rattlemedia')
self._player = RattleMediaPlayer()
self._player.end_of_stream_event_handler = self.end_of_stream_event
self._player.state_change_event_handler = self.update_state
self._queue = deque([])
RattleMediaController._states = {PlayerStates.Paused: ControllerStatePaused(self, self._player),
PlayerStates.Stopped: ControllerStateStopped(self, self._player),
PlayerStates.Playing: ControllerStatePlaying(self, self._player),
'Unknown': ControllerState(self, self._player)}
self.state = ControllerState(self, self._player)
self.update_state()
def end_of_stream_event(self):
self._player.stop()
self.play()
def search(self, search_term):
self._logger.debug('Searching for {0}'.format(search_term))
return self._api.search_all_access(search_term)
def enqueue(self, song_id):
self._logger.info('Enqueuing {0}'.format(song_id))
self._queue.append(song_id)
def play(self):
self.state.play()
def stop(self):
self.state.stop()
self._queue.clear()
def toggle_playback(self):
self.state.toggle()
def next(self):
self.state.next()
def play_album(self, album_id):
self._logger.info('Playing album {0}'.format(album_id))
self.stop()
self.enqueue_album(album_id)
self.play()
def enqueue_album(self, album_id):
album = self._api.get_album_info(album_id)
tracks = album['tracks']
for track in tracks:
self._queue.append(track['nid'])
def update_state(self):
current_state = None
try:
current_state = self._player.get_state()
self._logger.debug('Switching state to {0}'.format(current_state))
self.state = self._states[current_state]
self._logger.info('Switched state to {0}'.format(self.state))
except KeyError:
self._logger.warn('Switching to unknown state {0}'.format(current_state))
self.state = self._states['Unknown']
finally:
self.state_change_callback(current_state)
# Override with callback if required
def state_change_callback(self, new_state):
pass
|
|
#!/usr/bin/python
from math import sqrt
import sys
import argparse
from collections import deque
from struct import pack, unpack
import numpy as np
from osgeo import gdal
gdal.UseExceptions()
gdal.TermProgress = gdal.TermProgress_nocb
#
# NormalVector
#
# Calculate the normal vector of a triangle. (Unit vector perpendicular to
# triangle surface, pointing away from the "outer" face of the surface.)
# Computed using 32-bit float operations for consistency with other tools.
#
# Parameters:
# triangle vertices (nested x y z tuples)
#
# Returns:
# normal vector (x y z tuple)
#
def NormalVector(t):
(ax, ay, az) = t[0]
(bx, by, bz) = t[1]
(cx, cy, cz) = t[2]
# first edge
e1x = np.float32(ax) - np.float32(bx)
e1y = np.float32(ay) - np.float32(by)
e1z = np.float32(az) - np.float32(bz)
# second edge
e2x = np.float32(bx) - np.float32(cx)
e2y = np.float32(by) - np.float32(cy)
e2z = np.float32(bz) - np.float32(cz)
# cross product
cpx = np.float32(e1y * e2z) - np.float32(e1z * e2y)
cpy = np.float32(e1z * e2x) - np.float32(e1x * e2z)
cpz = np.float32(e1x * e2y) - np.float32(e1y * e2x)
# return cross product vector normalized to unit length
mag = np.sqrt(np.power(cpx, 2) + np.power(cpy, 2) + np.power(cpz, 2))
return (cpx/mag, cpy/mag, cpz/mag)
# stlwriter is a simple class for writing binary STL meshes.
# Class instances are constructed with a predicted face count.
# The output file header is overwritten upon completion with
# the actual face count.
class stlwriter():
# path: output binary stl file path
# facet_count: predicted number of facets
def __init__(self, path, facet_count=0):
self.f = open(path, 'wb')
# track number of facets actually written
self.written = 0
# write binary stl header with predicted facet count
self.f.write('\0' * 80)
# (facet count is little endian 4 byte unsigned int)
self.f.write(pack('<I', facet_count))
# t: ((ax, ay, az), (bx, by, bz), (cx, cy, cz))
def add_facet(self, t):
# facet normals and vectors are little endian 4 byte float triplets
# strictly speaking, we don't need to compute NormalVector,
# as other tools could be used to update the output mesh.
self.f.write(pack('<3f', *NormalVector(t)))
for vertex in t:
self.f.write(pack('<3f', *vertex))
# facet records conclude with two null bytes (unused "attributes")
self.f.write('\0\0')
self.written += 1
def done(self):
# update final facet count in header before closing file
self.f.seek(80)
self.f.write(pack('<I', self.written))
self.f.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.done()
def fail(msg):
print >> sys.stderr, msg
exit(1)
def log(msg):
if args.verbose:
print >> sys.stderr, msg
ap = argparse.ArgumentParser(description='Convert a GDAL raster (like a GeoTIFF heightmap) to an STL terrain surface.')
ap.add_argument('-x', action='store', default=0.0, type=float, help='Fit output x to extent (mm)')
ap.add_argument('-y', action='store', default=0.0, type=float, help='Fit output y to extent (mm)')
optg_z = ap.add_mutually_exclusive_group()
optg_z.add_argument('-z', action='store', default=None, type=float, help='Z scale expressed as a vertical scale factor (1)')
optg_z.add_argument('-s', action='store', default=None, type=float, help='Z scale expressed as a ratio of vertical units per horizontal unit (1)')
ap.add_argument('-b', '--base', action='store', default=0.0, type=float, help='Base height (0)')
ap.add_argument('-c', '--clip', action='store_true', default=False, help='Clip z to minimum elevation')
ap.add_argument('-v', '--verbose', action='store_true', default=False, help='Print log messages')
ap.add_argument('--band', action='store', default=1, type=int, help='Raster data band (1)')
ap.add_argument('-m', '--minimum', action='store', default=None, type=float, help='Omit vertices below minimum elevation')
ap.add_argument('-M', '--maximum', action='store', default=None, type=float, help='Omit vertices above maximum elevation')
optg_region = ap.add_mutually_exclusive_group()
optg_region.add_argument('-w', '--window', action='store', default=None, type=float, nargs=4, help='Opposing corner coordinates in geographic CRS')
optg_region.add_argument('-p', '--pixels', action='store', default=None, type=float, nargs=4, help='Opposing corner coordinates in pixel coordinates')
ap.add_argument('RASTER', help='Input heightmap image')
ap.add_argument('STL', help='Output STL path')
args = ap.parse_args()
try:
img = gdal.Open(args.RASTER)
except RuntimeError, e:
fail(str(e).strip())
# input raster dimensions
w = img.RasterXSize
h = img.RasterYSize
log("raster dimensions = (%s, %s)" % (str(w), str(h)))
# get default transformation from image coordinates to world coordinates
# note that since we obtain this transformation before making any region
# selection, and since this transformation is used for output, subset
# window regions will be aligned correctly in output stl coordinates.
t = img.GetGeoTransform()
git = gdal.InvGeoTransform(t)[1]
log("geo->pixel transform: %s" % str(git))
if args.window != None:
# if a geographic window is specified, convert it to a pixel window in input raster coordinates
# apply inverse geo transform to window points
px0, py0 = gdal.ApplyGeoTransform(git, args.window[0], args.window[1])
px1, py1 = gdal.ApplyGeoTransform(git, args.window[2], args.window[3])
# set arg.pixels to obtained pixel points
args.pixels = [px0, py0, px1, py1]
if args.pixels == None:
# if no pixel extent window is specified, use whole input raster.
xmin = 0
ymin = 0
ww = w
wh = h
else:
# if a pixel extent window is specified (either directly with --pixels or
# derived from a geographic --window), clip it to the input raster extent.
xmin = int(round(min(args.pixels[0], args.pixels[2])))
ymin = int(round(min(args.pixels[1], args.pixels[3])))
xmax = int(round(max(args.pixels[0], args.pixels[2])))
ymax = int(round(max(args.pixels[1], args.pixels[3])))
if xmin >= w:
fail("Region of interest lies entirely outside raster (xmin)")
if ymin >= h:
fail("Region of interest lies entirely outside raster (ymin")
if xmax <= 0:
fail("Region of interest lies entirely outside raster (xmax)")
if ymax <= 0:
fail("Region of interest lies entirely outside raster (ymax)")
# if we passed those tests, at least part of the window overlaps the raster,
# so we can safely clip to the raster extent and still have something
if xmin < 0:
xmin = 0
if ymin < 0:
ymin = 0
if xmax > w:
xmax = w
if ymax > h:
ymax = h
ww = xmax - xmin
wh = ymax - ymin
log("xmin, ymin = %d, %d" % (xmin, ymin))
log("ww, wh = %d, %d" % (ww, wh))
# output mesh dimensions are one row and column less than raster window
mw = ww - 1
mh = wh - 1
# save x pixel size if needed for scaling
xyres = abs(t[1])
# Apply z scale factor, if any
if args.z != None:
zscale = args.z
elif args.s != None:
zscale = 1.0 / args.s
else:
zscale = 1.0
# recalculate z scale and xy transform if different dimensions are requested
if args.x != 0.0 or args.y != 0.0:
# recaculate xy scale based on requested x or y dimension
# if both x and y dimension are set, select smaller scale
if args.x != 0.0 and args.y != 0.0:
pixel_scale = min(args.x / mw, args.y / mh)
elif args.x != 0.0:
pixel_scale = args.x / mw
elif args.y != 0.0:
pixel_scale = args.y / mh
# adjust z scale to maintain proportions with new xy scale
zscale *= pixel_scale / xyres
# revise transformation matrix
# image: 0,0 at top left corner of top left pixel (0.5,0.5 at pixel center)
t = (
-pixel_scale * mw / 2.0, # 0 left edge of top left pixel
pixel_scale, # 1 pixel width
0, # 2
pixel_scale * mh / 2.0, # 3 top edge of top left pixel
0, # 4
-pixel_scale # 5 pixel height
)
log("transform = %s" % str(t))
band = img.GetRasterBand(args.band)
nd = band.GetNoDataValue()
# map GDAL pixel data type to corresponding struct format character
typemap = {
gdal.GDT_Byte: 'B',
gdal.GDT_UInt16: 'H',
gdal.GDT_Int16: 'h',
gdal.GDT_UInt32: 'I',
gdal.GDT_Int32: 'i',
gdal.GDT_Float32: 'f',
gdal.GDT_Float64: 'd'
}
typeName = gdal.GetDataTypeName(band.DataType)
if band.DataType not in typemap:
fail('Unsupported data type: %s' % typeName)
# rowformat is used to unpack a row of raw image data to numeric form
rowformat = typemap.get(band.DataType) * ww
log("data type = %s" % typeName)
log("type format = %s" % typemap.get(band.DataType))
# min, max, mean, sd; min used for z clipping
stats = band.GetStatistics(True, True)
log("min, max, mean, sd = %s" % str(stats))
# zmin is subtracted from elevation values
if args.clip == True:
zmin = stats[0]
else:
zmin = 0
log("zmin = %s" % str(zmin))
# Rolling pixel buffer has space for two rows of image data.
# Old data is automatically discarded as new data is loaded.
pixels = deque(maxlen = (2 * ww))
log("buffer size = %s" % str(pixels.maxlen))
# Initialize pixel buffer with first row of data from the image window.
pixels.extend(unpack(rowformat, band.ReadRaster(xmin, ymin, ww, 1, ww, 1, band.DataType)))
# Precalculate output mesh size (STL is 50 bytes/facet + 84 byte header)
# Actual facet count and file size may differ (be less) if pixels are skipped as nodata or out of range.
facetcount = mw * mh * 2
filesize = (facetcount * 50) + 84
log("predicted (max) facet count = %s" % str(facetcount))
log("predicted (max) STL file size = %s bytes" % str(filesize))
# skip(v) tests if elevation value v should be omitted from output.
# It returns True if v matches the nodata value, if v is less than
# the minimum allowed elevation, or if v is greater than the
# maximum allowed elevation. Otherwise it returns False.
def skip(v):
global nd
global args
if v == nd:
return True
if args.minimum != None and v < args.minimum:
return True
if args.maximum != None and v > args.maximum:
return True
return False
with stlwriter(args.STL, facetcount) as mesh:
for y in range(mh):
# Each row, extend pixel buffer with the next row of data from the image window.
pixels.extend(unpack(rowformat, band.ReadRaster(xmin, ymin + y + 1, ww, 1, ww, 1, band.DataType)))
for x in range(mw):
# Elevation values of this pixel (a) and its neighbors (b, c, and d).
av = pixels[x]
bv = pixels[ww + x]
cv = pixels[x + 1]
dv = pixels[ww + x + 1]
# Apply transforms to obtain output mesh coordinates of the
# four corners composed of raster points a (x, y), b, c,
# and d (x + 1, y + 1):
#
# a-c a-c c
# |/| = |/ + /|
# b-d b b-d
# Points b and c are required for both facets, so if either
# are unavailable, we can skip this pixel altogether.
if skip(bv) or skip(cv):
continue
b = (
t[0] + ((xmin + x) * t[1]) + ((ymin + y + 1) * t[2]),
t[3] + ((xmin + x) * t[4]) + ((ymin + y + 1) * t[5]),
(zscale * (float(bv) - zmin)) + args.base
)
c = (
t[0] + ((xmin + x + 1) * t[1]) + ((ymin + y) * t[2]),
t[3] + ((xmin + x + 1) * t[4]) + ((ymin + y) * t[5]),
(zscale * (float(cv) - zmin)) + args.base
)
if not skip(av):
a = (
t[0] + ((xmin + x) * t[1]) + ((ymin + y) * t[2]),
t[3] + ((xmin + x) * t[4]) + ((ymin + y) * t[5]),
(zscale * (float(av) - zmin)) + args.base
)
mesh.add_facet((a, b, c))
if not skip(dv):
d = (
t[0] + ((xmin + x + 1) * t[1]) + ((ymin + y + 1) * t[2]),
t[3] + ((xmin + x + 1) * t[4]) + ((ymin + y + 1) * t[5]),
(zscale * (float(dv) - zmin)) + args.base
)
mesh.add_facet((d, c, b))
# Update progress each row
gdal.TermProgress(float(y + 1) / mh)
log("actual facet count: %s" % str(mesh.written))
|
|
from __future__ import division, unicode_literals
import base64
import io
import itertools
import os
import time
from .fragment import FragmentFD
from ..compat import (
compat_etree_fromstring,
compat_urlparse,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_struct_pack,
compat_struct_unpack,
)
from ..utils import (
encodeFilename,
fix_xml_ampersands,
sanitize_open,
xpath_text,
)
class DataTruncatedError(Exception):
pass
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
def read_bytes(self, n):
data = self.read(n)
if len(data) < n:
raise DataTruncatedError(
'FlvReader error: need %d bytes while only %d bytes got' % (
n, len(data)))
return data
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return compat_struct_unpack('!Q', self.read_bytes(8))[0]
def read_unsigned_int(self):
return compat_struct_unpack('!I', self.read_bytes(4))[0]
def read_unsigned_char(self):
return compat_struct_unpack('!B', self.read_bytes(1))[0]
def read_string(self):
res = b''
while True:
char = self.read_bytes(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read_bytes(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read_bytes(real_size - header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
flags = self.read_unsigned_char()
live = flags & 0x20 != 0
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
'live': live,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
fragments_counter = itertools.count(first_frag_number)
for segment, fragments_count in segment_run_table['segment_run']:
for _ in range(fragments_count):
res.append((segment, next(fragments_counter)))
if boot_info['live']:
res = res[-2:]
return res
def write_unsigned_int(stream, val):
stream.write(compat_struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(compat_struct_pack('!I', val)[1:])
def write_flv_header(stream):
"""Writes the FLV header to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
stream.write(b'\x00\x00\x00\x00')
def write_metadata_tag(stream, metadata):
"""Writes optional metadata tag to stream"""
SCRIPT_TAG = b'\x12'
FLV_TAG_HEADER_LEN = 11
if metadata:
stream.write(SCRIPT_TAG)
write_unsigned_int_24(stream, len(metadata))
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
def remove_encrypted_media(media):
return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
media))
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class F4mFD(FragmentFD):
"""
A downloader for f4m manifests or AdobeHDS.
"""
FD_NAME = 'f4m'
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:
self.report_error('Missing ID in f4m DRM')
media = remove_encrypted_media(media)
if not media:
self.report_error('Unsupported DRM')
return media
def _get_bootstrap_from_url(self, bootstrap_url):
bootstrap = self.ydl.urlopen(bootstrap_url).read()
return read_bootstrap_info(bootstrap)
def _update_live_fragments(self, bootstrap_url, latest_fragment):
fragments_list = []
retries = 30
while (not fragments_list) and (retries > 0):
boot_info = self._get_bootstrap_from_url(bootstrap_url)
fragments_list = build_fragments_list(boot_info)
fragments_list = [f for f in fragments_list if f[1] > latest_fragment]
if not fragments_list:
# Retry after a while
time.sleep(5.0)
retries -= 1
if not fragments_list:
self.report_error('Failed to update fragments')
return fragments_list
def _parse_bootstrap_node(self, node, base_url):
# Sometimes non empty inline bootstrap info can be specified along
# with bootstrap url attribute (e.g. dummy inline bootstrap info
# contains whitespace characters in [1]). We will prefer bootstrap
# url over inline bootstrap info when present.
# 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m
bootstrap_url = node.get('url')
if bootstrap_url:
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_url)
boot_info = self._get_bootstrap_from_url(bootstrap_url)
else:
bootstrap_url = None
bootstrap = base64.b64decode(node.text.encode('ascii'))
boot_info = read_bootstrap_info(bootstrap)
return boot_info, bootstrap_url
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
urlh = self.ydl.urlopen(man_url)
man_url = urlh.geturl()
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
# and https://github.com/rg3/youtube-dl/issues/7823)
manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()
doc = compat_etree_fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None or len(formats) == 1:
# get the best format
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
else:
rate, media = list(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, base_url)
live = boot_info['live']
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
else:
metadata = None
fragments_list = build_fragments_list(boot_info)
test = self.params.get('test', False)
if test:
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
ctx = {
'filename': filename,
'total_frags': total_frags,
'live': live,
}
self._prepare_frag_download(ctx)
dest_stream = ctx['dest_stream']
write_flv_header(dest_stream)
if not live:
write_metadata_tag(dest_stream, metadata)
base_url_parsed = compat_urllib_parse_urlparse(base_url)
self._start_frag_download(ctx)
frags_filenames = []
while fragments_list:
seg_i, frag_i = fragments_list.pop(0)
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
query = []
if base_url_parsed.query:
query.append(base_url_parsed.query)
if akamai_pv:
query.append(akamai_pv.strip(';'))
if info_dict.get('extra_param_to_segment_url'):
query.append(info_dict['extra_param_to_segment_url'])
url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
try:
success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()})
if not success:
return False
(down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
down_data = down.read()
down.close()
reader = FlvReader(down_data)
while True:
try:
_, box_type, box_data = reader.read_box_info()
except DataTruncatedError:
if test:
# In tests, segments may be truncated, and thus
# FlvReader may not be able to parse the whole
# chunk. If so, write the segment as is
# See https://github.com/rg3/youtube-dl/issues/9214
dest_stream.write(down_data)
break
raise
if box_type == b'mdat':
dest_stream.write(box_data)
break
if live:
os.remove(encodeFilename(frag_sanitized))
else:
frags_filenames.append(frag_sanitized)
except (compat_urllib_error.HTTPError, ) as err:
if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue
# with the next available fragment.
msg = 'Fragment %d unavailable' % frag_i
self.report_warning(msg)
fragments_list = []
else:
raise
if not fragments_list and not test and live and bootstrap_url:
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
total_frags += len(fragments_list)
if fragments_list and (fragments_list[0][1] > frag_i + 1):
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
self.report_warning(msg)
self._finish_frag_download(ctx)
for frag_file in frags_filenames:
os.remove(encodeFilename(frag_file))
return True
|
|
"""XML utilities."""
###########################################################################
# PYTHON FILE: xmlutils.py
# CREATED BY: Peter Taylor (November 2002)
#
# Some XML utilities.
#
# $Header$
###########################################################################
# system libraries
import copy
import xml.dom.minidom
class XMLUtilsError(Exception):
"""Exceptions for XMLUtils operations."""
def __init__(self, value):
self.value = value
def __str__(self):
return `self.value`
class Document:
"""Data structure with natural correspondence to XML documents.
At the document level, there is a single Node. A Node in an XML document
is something defined by one complete set of angled brackets.
Thus a Node has a name (mandatory), text (optional), attributes (optional)
and a list of child Nodes (optional)."""
def __init__ (self,
node=None,
xmlFile=None,
xmlString=None,
dom=None):
"""Constructs the Document from either the field, xmlFile, xmlString
or existing DOM document.
Construction attempts are in that order."""
self.xmlFile = xmlFile
if node is not None:
self.node = node
elif xmlFile is not None:
self._fromXmlFile (xmlFile)
elif xmlString is not None:
self._fromXmlString (xmlString)
elif dom is not None:
self._fromDom (dom)
else: self.node = None
def toxml(self, indent = "", newl = ""):
"""Converts document to an XML string."""
dom = self._makeDom()
xml = dom.toprettyxml(indent, newl)
dom.unlink()
return xml
def writexml(self, file=None, indent = "", addIndent = "", newl = ""):
"""Writes document to file in XML format."""
dom = self._makeDom()
if file is None:
file = self.xmlFile
xml = dom.toprettyxml("\t", "\n")
fileo = open(file,'w')
fileo.write(xml)
else:
dom.writexml(file, indent, addIndent, newl)
dom.unlink()
def _isValid (self):
if self.node is None:
return False
return True
def _makeDom (self ):
if not self._isValid():
raise XMLUtilsError,"Invalid Document - cannot be converted to DOM"
dom = xml.dom.minidom.Document()
dom.appendChild (self.node.makeDomElement (dom))
return dom
def _fromXmlFile (self, xmlFile):
dom = xml.dom.minidom.parse(xmlFile)
self._fromDom (dom)
dom.unlink()
def _fromXmlString (self, xmlString):
dom = xml.dom.minidom.parseString(xmlString)
self._fromDom (dom)
dom.unlink()
def _fromDom (self, dom):
if dom.nodeType == dom.DOCUMENT_NODE:
self.node = NodeFromDomElement (dom.documentElement)
else:
raise XMLUtilsError, "Must be created from a DOM document node"
class Node:
"""Defines a node within an XML document.
A node has a name, text, attributes and a list of child nodes.
For example:
<hello />
would have name=hello, no text, no attributes and no child nodes.
For example:
<hello world="true" />
would have name=hello, one attribute "world" with the value "true", no text
and no child nodes. Note that attribute values are always strings.
For example:
<sentence>the quick brown fox jumps over a lazy dog</sentence>
would have name="sentence", text="the quick brown fox jumps over a lazy
dog", no attributes and no child nodes.
For example:
<para><contents>hello world</contents><font>Courier,10</font></para>
would have name="para", no text, no attributes and two child nodes. The
child nodes would respectively have name="contents" and text="hello world"
and name="font" and text="Courier,10".
Note that you can have child nodes with the same name in the list of child
nodes - and that is quite normal!
One major difference between Node and equivalent structures within DOM is
that we will combine adjacent text objects into a single text values into
a single text value. With DOM you will often get multiple text values.
"""
def __init__ (self, name, text=None, attributes=None, childNodes=None):
"""Node constructor.
name is mandatory - represents the name tag with <...>
text is optional - if None then defaults to an empty string
attributes is optional - if None then defaults to an empty dictionary
childNodes is optional - if None then defaults to an empty list
If text, attributes or childNodes are provided, then they are only
reference copied. Thus changes to the variable in the calling code
will be reflected within the Node object."""
self.name = name
if text is None: self.text = ""
else: self.text = text
if attributes is None: self.attributes = {}
else: self.attributes = attributes
if childNodes is None: self.childNodes = []
else: self.childNodes = childNodes
def __str__ (self):
"""Converts itself to a temporary document, then prints out the XML"""
dom = xml.dom.minidom.Document()
domNode = self.makeDomElement (dom)
xmlString = domNode.toprettyxml (indent=" ", newl="\n")
dom.unlink()
return xmlString
def append (self,node):
"""Appends a node to the list of child nodes"""
if isinstance(node, Node):
self.childNodes.append (node)
else:
raise XMLUtilsError, "Appended value must of type Node"
def makeDomElement(self, dom):
"""Creates a DOM element for a DOM document."""
domElement = dom.createElement (self.name)
for attrName in self.attributes.keys():
domElement.setAttribute (attrName, self.attributes[attrName])
if self.text is not None and self.text != "":
domElement.appendChild (dom.createTextNode(self.text))
for node in self.childNodes:
domElement.appendChild (node.makeDomElement (dom))
return domElement
def getAttribute(self, attrName, default=""):
"""Gets an attribute.
Returns the default value (by default="") if the attrName is undefined.
"""
return self.attributes.get (attrName, default)
def setAttribute(self, attrName, attrValue):
"""Sets an attribute value - overwriting anything there beforehand."""
self.attributes[attrName] = attrValue
def getChildAttributesDQBTimeSeries(self):
selected , expression, name, id = None, None , None, None
for n in self.childNodes:
if n.name =='selected':
selected = n.text
continue
if n.name =='expression':
expression = n.text
continue
if n.name == 'name':
name = n.text
continue
if n.name == "id":
id = n.text
continue
return selected , expression, name, id
def NodeFromDomElement (domNode):
"""Returns a Node from a dom element node which must be an ELEMENT_NODE"""
if domNode.nodeType != domNode.ELEMENT_NODE:
raise XMLUtilsError, "DOM node must be an ELEMENT node"
nodeList = [] # ordered list of nodes
textLines = [] # lines of text - will be joined together to form text
attributes = {} # node attributes as a dictionary
if domNode.attributes is not None:
for i in range(domNode.attributes.length):
attribute = domNode.attributes.item(i)
attributes[attribute.nodeName] = attribute.value
textLines = []
for item in domNode.childNodes:
if item.nodeType == item.TEXT_NODE:
textLines.append(item.data)
elif item.nodeType == item.ELEMENT_NODE:
subNode = NodeFromDomElement (item)
nodeList.append (subNode)
"""
else:
raise XMLUtilsError, "Child node must be TEXT or ELEMENT node"
"""
pass # end-for
text = "".join(textLines).strip()
node = Node (domNode.nodeName, text, attributes, nodeList)
return node
if __name__ == "__main__":
import sys
for name in sys.argv[1:]:
print "Processing file: %s" % name
file = open(name, "r")
doc = Document(xmlFile = file)
file.close()
#print doc.toxml (indent=" ", newl="\n")
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_netuitive
----------------------------------
Tests for `netuitive` module.
"""
import unittest
import os
import json
import time
import netuitive
import datetime
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def getFixtureDirPath():
path = os.path.join(
os.path.dirname('tests/'),
'fixtures')
return path
def getFixturePath(fixture_name):
path = os.path.join(getFixtureDirPath(),
fixture_name)
if not os.access(path, os.R_OK):
print('Missing Fixture ' + path)
return path
def getFixture(fixture_name):
with open(getFixturePath(fixture_name), 'r') as f:
return StringIO(f.read())
class TestClientInit(unittest.TestCase):
def setUp(self):
pass
def test_custom_endpoint(self):
# test custom endpoint url creation
a = netuitive.Client('https://example.com/ingest', 'apikey')
self.assertEqual(a.url, 'https://example.com/ingest')
self.assertEqual(a.api_key, 'apikey')
self.assertEqual(a.dataurl, 'https://example.com/ingest/apikey')
self.assertEqual(
a.eventurl, 'https://example.com/ingest/events/apikey')
def test_infrastructure_endpoint(self):
# test infrastructure endpoint url creation
a = netuitive.Client(
'https://example.com/ingest/infrastructure', 'apikey')
self.assertEqual(a.url, 'https://example.com/ingest/infrastructure')
self.assertEqual(a.api_key, 'apikey')
self.assertEqual(
a.dataurl, 'https://example.com/ingest/infrastructure/apikey')
self.assertEqual(
a.eventurl, 'https://example.com/ingest/events/infrastructure/apikey')
def test_minimum(self):
# test infrastructure endpoint url creation
a = netuitive.Client(api_key='apikey')
self.assertEqual(a.url, 'https://api.app.netuitive.com/ingest')
self.assertEqual(a.api_key, 'apikey')
self.assertEqual(
a.dataurl, 'https://api.app.netuitive.com/ingest/apikey')
self.assertEqual(
a.eventurl, 'https://api.app.netuitive.com/ingest/events/apikey')
def test_trailing_slash(self):
# test negation of trailing / on the url
a = netuitive.Client('https://example.com/ingest/', 'apikey')
self.assertEqual(a.url, 'https://example.com/ingest')
self.assertEqual(a.api_key, 'apikey')
self.assertEqual(a.dataurl, 'https://example.com/ingest/apikey')
self.assertEqual(
a.eventurl, 'https://example.com/ingest/events/apikey')
def test_default_agent(self):
# test default agent string
a = netuitive.Client('https://example.com/ingest', 'apikey')
self.assertEqual(a.agent, 'Netuitive-Python/' + netuitive.__version__)
def test_custom_agent(self):
# test default agent string
a = netuitive.Client('https://example.com/ingest', 'apikey', 'phil')
self.assertEqual(a.agent, 'phil')
def tearDown(self):
pass
class TestElementInit(unittest.TestCase):
def setUp(self):
pass
def test_no_args(self):
a = netuitive.Element()
self.assertEqual(a.type, 'SERVER')
def test_element_type(self):
a = netuitive.Element('NOT_SERVER')
self.assertEqual(a.type, 'NOT_SERVER')
def test_element_localtion(self):
a = netuitive.Element('SERVER', 'here')
self.assertEqual(a.location, 'here')
b = netuitive.Element('SERVER', location='here too')
self.assertEqual(b.location, 'here too')
def test_post_format(self):
a = netuitive.Element('SERVER', 'here')
a.merge_metrics()
ajson = json.dumps(
[a], default=lambda o: o.__dict__, sort_keys=True)
self.assertEqual(ajson, getFixture(
'TestElementInit.test_post_format').getvalue())
def tearDown(self):
pass
class TestElementAttributes(unittest.TestCase):
def setUp(self):
self.a = netuitive.Element()
self.a.add_attribute('Test', 'TestValue')
self.a.add_attribute('Test2', 'TestValue2')
def test(self):
self.assertEqual(self.a.attributes[0].name, 'Test')
self.assertEqual(self.a.attributes[0].value, 'TestValue')
self.assertEqual(self.a.attributes[1].name, 'Test2')
self.assertEqual(self.a.attributes[1].value, 'TestValue2')
def test_post_format(self):
self.a.merge_metrics()
ajson = json.dumps(
[self.a], default=lambda o: o.__dict__, sort_keys=True)
self.assertEqual(ajson, getFixture(
'TestElementAttributes.test_post_format').getvalue())
def tearDown(self):
pass
class TestElementRelations(unittest.TestCase):
def setUp(self):
self.a = netuitive.Element()
self.a.add_relation('Test')
self.a.add_relation('Test2')
def test(self):
self.assertEqual(self.a.relations[0].fqn, 'Test')
self.assertEqual(self.a.relations[1].fqn, 'Test2')
def test_post_format(self):
self.a.merge_metrics()
ajson = json.dumps(
[self.a], default=lambda o: o.__dict__, sort_keys=True)
self.assertEqual(ajson, getFixture(
'TestElementRelations.test_post_format').getvalue())
def tearDown(self):
pass
class TestElementTags(unittest.TestCase):
def setUp(self):
pass
def test(self):
a = netuitive.Element()
a.add_tag('Test', 'TestValue')
self.assertEqual(a.tags[0].name, 'Test')
self.assertEqual(a.tags[0].value, 'TestValue')
def tearDown(self):
pass
class TestElementSamples(unittest.TestCase):
def setUp(self):
pass
def test_add_sample(self):
a = netuitive.Element()
a.add_sample(
'metricId', 1434110794, 1, 'COUNTER', host='hostname')
self.assertEqual(a.id, 'hostname')
self.assertEqual(a.name, 'hostname')
self.assertEqual(a._metrics['metricId'].id, 'metricId')
self.assertEqual(a._metrics['metricId'].type, 'COUNTER')
def test_add_sample_with_tags(self):
a = netuitive.Element()
a.add_sample(
'tagged', 1434110794, 1, 'COUNTER', host='hostname', tags=[{'utilization': 'true'}])
self.assertEqual(a.id, 'hostname')
self.assertEqual(a.name, 'hostname')
self.assertEqual(a._metrics['tagged'].id, 'tagged')
self.assertEqual(a._metrics['tagged'].type, 'COUNTER')
self.assertEqual(a._metrics['tagged'].tags[0].name, 'utilization')
self.assertEqual(a._metrics['tagged'].tags[0].value, 'true')
def test_duplicate_metrics(self):
a = netuitive.Element()
a.add_sample(
'metricId', 1434110794, 1, 'COUNTER', host='hostname')
a.add_sample(
'metricId', 1434110795, 2, 'COUNTER', host='hostname')
# don't allow duplicate metrics
self.assertEqual(len(a._metrics), 1)
self.assertEqual(a._metrics['metricId'].id, 'metricId')
self.assertEqual(a._metrics['metricId'].type, 'COUNTER')
self.assertEqual(a.samples[0].metricId, 'metricId')
self.assertEqual(a.samples[0].timestamp, 1434110794000)
self.assertEqual(a.samples[0].val, 1)
self.assertEqual(a.samples[1].metricId, 'metricId')
self.assertEqual(a.samples[1].timestamp, 1434110795000)
self.assertEqual(a.samples[1].val, 2)
def test_clear_samples(self):
a = netuitive.Element()
a.add_sample(
'metricId', 1434110794, 1, 'COUNTER', host='hostname')
# test clear_samples
self.assertEqual(len(a._metrics), 1)
a.clear_samples()
self.assertEqual(len(a.metrics), 0)
self.assertEqual(len(a._metrics), 0)
self.assertEqual(len(a.samples), 0)
def test_with_sparseDataStrategy(self):
a = netuitive.Element()
# test sparseDataStrategy
a.add_sample(
'nonsparseDataStrategy', 1434110794, 1, 'COUNTER', host='hostname')
a.add_sample(
'sparseDataStrategy', 1434110794, 1, 'COUNTER', host='hostname', sparseDataStrategy='ReplaceWithZero')
self.assertEqual(a._metrics['nonsparseDataStrategy'].sparseDataStrategy, 'None')
self.assertEqual(a._metrics['sparseDataStrategy'].sparseDataStrategy, 'ReplaceWithZero')
a.clear_samples()
def test_with_unit(self):
a = netuitive.Element()
# test unit
a.add_sample(
'unit', 1434110794, 1, 'COUNTER', host='hostname', unit='Bytes')
a.add_sample(
'nonunit', 1434110794, 1, 'COUNTER', host='hostname')
self.assertEqual(a._metrics['unit'].unit, 'Bytes')
self.assertEqual(a._metrics['nonunit'].unit, '')
def test_with_min(self):
a = netuitive.Element()
a.add_sample(
'min', 1434110794, 1, 'COUNTER', host='hostname', min=0)
self.assertEqual(
a.samples[0].min, 0)
def test_with_max(self):
a = netuitive.Element()
a.add_sample(
'max', 1434110794, 1, 'COUNTER', host='hostname', max=100)
self.assertEqual(
a.samples[0].max, 100)
def test_with_avg(self):
a = netuitive.Element()
a.add_sample(
'avg', 1434110794, 1, 'COUNTER', host='hostname', avg=50)
self.assertEqual(
a.samples[0].avg, 50)
def test_with_sum(self):
a = netuitive.Element()
a.add_sample(
'sum', 1434110794, 1, 'COUNTER', host='hostname', sum=2)
self.assertEqual(
a.samples[0].sum, 2)
def test_with_cnt(self):
a = netuitive.Element()
a.add_sample(
'cnt', 1434110794, 1, 'COUNTER', host='hostname', cnt=3)
self.assertEqual(
a.samples[0].cnt, 3)
def test_add_sanitize(self):
a = netuitive.Element()
a.add_sample(
'mongo.wiredTiger.cache.eviction$server populating queue,:but not evicting pages', 1434110794, 1, 'COUNTER', host='hostname')
self.assertEqual(a._metrics['mongo.wiredTiger.cache.eviction_server_populating_queue__but_not_evicting_pages'].id, 'mongo.wiredTiger.cache.eviction_server_populating_queue__but_not_evicting_pages')
def test_post_format(self):
a = netuitive.Element()
a.add_sample(
'min.max.avg.sum.cnt', 1434110794, 1, 'COUNTER', host='hostname', min=0, max=100, avg=50, sum=2, cnt=3)
a.merge_metrics()
ajson = json.dumps(
[a], default=lambda o: o.__dict__, sort_keys=True)
self.assertEqual(ajson, getFixture(
'TestElementSamples.test_post_format').getvalue())
def test_add_sample_ms(self):
a = netuitive.Element()
a.add_sample(
'metricId', 1475158966202, 1, 'COUNTER', host='hostname', ts_is_ms=True)
self.assertEqual(a.samples[0].timestamp, 1475158966202)
def test_add_sample_no_timestamp(self):
a = netuitive.Element()
c = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
d = c.microseconds + (c.seconds + c.days * 86400) * 10**3
a.add_sample(
'metricId', None, 1, 'COUNTER', host='hostname')
e = a.samples[0].timestamp - d
shouldbetrue = False
# minimum.timstamp has to be within the 10 second
if 10000 > e:
shouldbetrue = True
self.assertTrue(shouldbetrue)
def tearDown(self):
pass
class TestEvent(unittest.TestCase):
def setUp(self):
self.everything = netuitive.Event('elementId', 'INFO', 'title', 'message', 'INFO',
[('name0', 'value0'), ('name1', 'value1')], 1434110794, 'source')
self.notags = netuitive.Event(
'elementId', 'INFO', 'title', 'message', 'INFO', timestamp=1434110794, source='source')
self.minimum = netuitive.Event(
'elementId', 'INFO', 'title', 'message', 'INFO')
self.everythingjson = json.dumps(
[self.everything], default=lambda o: o.__dict__, sort_keys=True)
self.notagsjson = json.dumps(
[self.notags], default=lambda o: o.__dict__, sort_keys=True)
self.minimumjson = json.dumps(
[self.minimum], default=lambda o: o.__dict__, sort_keys=True)
def test_all_options(self):
# test event with all options
self.assertEqual(self.everything.type, 'INFO')
self.assertEqual(self.everything.title, 'title')
self.assertEqual(self.everything.timestamp, 1434110794000)
self.assertEqual(self.everything.tags[0].name, 'name0')
self.assertEqual(self.everything.tags[0].value, 'value0')
self.assertEqual(self.everything.tags[1].name, 'name1')
self.assertEqual(self.everything.tags[1].value, 'value1')
data = self.everything.data
self.assertEqual(data.elementId, 'elementId')
self.assertEqual(data.level, 'INFO')
self.assertEqual(data.message, 'message')
def test_no_tags(self):
# test event without tags
self.assertEqual(self.notags.type, 'INFO')
self.assertEqual(self.notags.title, 'title')
self.assertEqual(self.notags.timestamp, 1434110794000)
self.assertEqual(hasattr(self.notags, 'tags'), False)
data = self.notags.data
self.assertEqual(data.elementId, 'elementId')
self.assertEqual(data.level, 'INFO')
self.assertEqual(data.message, 'message')
def test_minimum(self):
# test event with minimum options
shouldbetrue = False
t = int(time.time()) * 1000
# minimum.timstamp has to be within the 10 second
if t - 10000 < int(self.minimum.timestamp):
shouldbetrue = True
self.assertTrue(shouldbetrue)
self.assertEqual(self.minimum.title, 'title')
self.assertEqual(self.minimum.type, 'INFO')
data = self.minimum.data
self.assertEqual(data.elementId, 'elementId')
self.assertEqual(data.level, 'INFO')
self.assertEqual(data.message, 'message')
def test_post_format_everthing(self):
# test post format for event with all options
self.assertEqual(self.everythingjson, getFixture(
'TestEvent.test_post_format_everthing').getvalue())
def test_post_format_notags(self):
# test post format for event without tags
self.assertEqual(self.notagsjson, getFixture(
'TestEvent.test_post_format_notags').getvalue())
def test_post_format_minimum(self):
# test post format for event with minimum options
self.assertEqual(self.minimumjson, getFixture(
'TestEvent.test_post_format_minimum').getvalue().replace('TIMESTAMP_TEMPLATE', str(self.minimum.timestamp)))
def tearDown(self):
pass
class TestCheck(unittest.TestCase):
def setUp(self):
self.check = netuitive.Check('checkName', 'elementId', 60)
def test_check(self):
self.assertEqual(self.check.name, 'checkName')
self.assertEqual(self.check.elementId, 'elementId')
self.assertEqual(self.check.ttl, 60)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class DhcpRpcCallbackMixin(object):
"""A mix-in that enable DHCP agent support in plugin implementations."""
def _get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active networks."""
host = kwargs.get('host')
plugin = manager.NeutronManager.get_plugin()
if utils.is_extension_supported(
plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.network_auto_schedule:
plugin.auto_schedule_networks(context, host)
nets = plugin.list_active_networks_on_active_dhcp_agent(
context, host)
else:
filters = dict(admin_state_up=[True])
nets = plugin.get_networks(context, filters=filters)
return nets
def _port_action(self, plugin, context, port, action):
"""Perform port operations taking care of concurrency issues."""
try:
if action == 'create_port':
return plugin.create_port(context, port)
elif action == 'update_port':
return plugin.update_port(context, port['id'], port['port'])
else:
msg = _('Unrecognized action')
raise n_exc.Invalid(message=msg)
except (db_exc.DBError, n_exc.NetworkNotFound,
n_exc.SubnetNotFound, n_exc.IpAddressGenerationFailure) as e:
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = False
if isinstance(e, n_exc.IpAddressGenerationFailure):
# Check if the subnet still exists and if it does not,
# this is the reason why the ip address generation failed.
# In any other unlikely event re-raise
try:
subnet_id = port['port']['fixed_ips'][0]['subnet_id']
plugin.get_subnet(context, subnet_id)
except n_exc.SubnetNotFound:
pass
else:
ctxt.reraise = True
network_id = port['port']['network_id']
LOG.warn(_("Port for network %(net_id)s could not be created: "
"%(reason)s") % {"net_id": network_id, 'reason': e})
def get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active network ids."""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
LOG.debug(_('get_active_networks requested from %s'), host)
nets = self._get_active_networks(context, **kwargs)
return [net['id'] for net in nets]
def get_active_networks_info(self, context, **kwargs):
"""Returns all the networks/subnets/ports in system."""
host = kwargs.get('host')
LOG.debug(_('get_active_networks_info from %s'), host)
networks = self._get_active_networks(context, **kwargs)
plugin = manager.NeutronManager.get_plugin()
filters = {'network_id': [network['id'] for network in networks]}
ports = plugin.get_ports(context, filters=filters)
filters['enable_dhcp'] = [True]
subnets = plugin.get_subnets(context, filters=filters)
for network in networks:
network['subnets'] = [subnet for subnet in subnets
if subnet['network_id'] == network['id']]
network['ports'] = [port for port in ports
if port['network_id'] == network['id']]
return networks
def get_network_info(self, context, **kwargs):
"""Retrieve and return a extended information about a network."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
LOG.debug(_('Network %(network_id)s requested from '
'%(host)s'), {'network_id': network_id,
'host': host})
plugin = manager.NeutronManager.get_plugin()
try:
network = plugin.get_network(context, network_id)
except n_exc.NetworkNotFound:
LOG.warn(_("Network %s could not be found, it might have "
"been deleted concurrently."), network_id)
return
filters = dict(network_id=[network_id])
network['subnets'] = plugin.get_subnets(context, filters=filters)
network['ports'] = plugin.get_ports(context, filters=filters)
return network
def get_dhcp_port(self, context, **kwargs):
"""Allocate a DHCP port for the host and return port information.
This method will re-use an existing port if one already exists. When a
port is re-used, the fixed_ip allocation will be updated to the current
network state. If an expected failure occurs, a None port is returned.
"""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
LOG.debug(_('Port %(device_id)s for %(network_id)s requested from '
'%(host)s'), {'device_id': device_id,
'network_id': network_id,
'host': host})
plugin = manager.NeutronManager.get_plugin()
retval = None
filters = dict(network_id=[network_id])
subnets = dict([(s['id'], s) for s in
plugin.get_subnets(context, filters=filters)])
dhcp_enabled_subnet_ids = [s['id'] for s in
subnets.values() if s['enable_dhcp']]
try:
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
# Ensure that fixed_ips cover all dhcp_enabled subnets.
port = ports[0]
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id'])
port['fixed_ips'].extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
retval = plugin.update_port(context, port['id'],
dict(port=port))
except n_exc.NotFound as e:
LOG.warning(e)
if retval is None:
# No previous port exists, so create a new one.
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s '
'does not exist on %(host)s'),
{'device_id': device_id,
'network_id': network_id,
'host': host})
try:
network = plugin.get_network(context, network_id)
except n_exc.NetworkNotFound:
LOG.warn(_("Network %s could not be found, it might have "
"been deleted concurrently."), network_id)
return
port_dict = dict(
admin_state_up=True,
device_id=device_id,
network_id=network_id,
tenant_id=network['tenant_id'],
mac_address=attributes.ATTR_NOT_SPECIFIED,
name='',
device_owner=constants.DEVICE_OWNER_DHCP,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
retval = self._port_action(plugin, context, {'port': port_dict},
'create_port')
if not retval:
return
# Convert subnet_id to subnet dict
for fixed_ip in retval['fixed_ips']:
subnet_id = fixed_ip.pop('subnet_id')
fixed_ip['subnet'] = subnets[subnet_id]
return retval
def release_dhcp_port(self, context, **kwargs):
"""Release the port currently being used by a DHCP agent."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
LOG.debug(_('DHCP port deletion for %(network_id)s request from '
'%(host)s'),
{'network_id': network_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
filters = dict(network_id=[network_id], device_id=[device_id])
plugin.delete_ports(context, filters=filters)
def release_port_fixed_ip(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
subnet_id = kwargs.get('subnet_id')
LOG.debug(_('DHCP port remove fixed_ip for %(subnet_id)s request '
'from %(host)s'),
{'subnet_id': subnet_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
port = ports[0]
fixed_ips = port.get('fixed_ips', [])
for i in range(len(fixed_ips)):
if fixed_ips[i]['subnet_id'] == subnet_id:
del fixed_ips[i]
break
plugin.update_port(context, port['id'], dict(port=port))
def update_lease_expiration(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
LOG.warning(_('Updating lease expiration is now deprecated. Issued '
'from host %s.'), host)
def create_dhcp_port(self, context, **kwargs):
"""Create and return dhcp port information.
If an expected failure occurs, a None port is returned.
"""
host = kwargs.get('host')
port = kwargs.get('port')
LOG.debug(_('Create dhcp port %(port)s '
'from %(host)s.'),
{'port': port,
'host': host})
port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
port['port'][portbindings.HOST_ID] = host
if 'mac_address' not in port['port']:
port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED
plugin = manager.NeutronManager.get_plugin()
return self._port_action(plugin, context, port, 'create_port')
def update_dhcp_port(self, context, **kwargs):
"""Update the dhcp port."""
host = kwargs.get('host')
port_id = kwargs.get('port_id')
port = kwargs.get('port')
LOG.debug(_('Update dhcp port %(port)s '
'from %(host)s.'),
{'port': port,
'host': host})
plugin = manager.NeutronManager.get_plugin()
return self._port_action(plugin, context,
{'id': port_id, 'port': port},
'update_port')
|
|
import operator
from django import template
from django.template.defaultfilters import stringfilter
from django.template.loader import get_template
from django.utils import six
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.filter
def noop(value, param=None):
"""A noop filter that always return its first argument and does nothing with
its second (optional) one.
Useful for testing out whitespace in filter arguments (see #19882)."""
return value
@register.simple_tag(takes_context=True)
def context_stack_length(context):
return len(context.dicts)
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return "no_params_with_context - Expected result (context value: %s)" % context['value']
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two='hi'):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two='hi', *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ', '.join(six.text_type(arg) for arg in args)
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = "Expected simple_tag_without_context_parameter __dict__"
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name='minustwo')
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name='minusone')
@register.inclusion_tag('inclusion.html')
def inclusion_no_params():
"""Expected inclusion_no_params __doc__"""
return {"result": "inclusion_no_params - Expected result"}
inclusion_no_params.anything = "Expected inclusion_no_params __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_no_params_from_template():
"""Expected inclusion_no_params_from_template __doc__"""
return {"result": "inclusion_no_params_from_template - Expected result"}
inclusion_no_params_from_template.anything = "Expected inclusion_no_params_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_one_param(arg):
"""Expected inclusion_one_param __doc__"""
return {"result": "inclusion_one_param - Expected result: %s" % arg}
inclusion_one_param.anything = "Expected inclusion_one_param __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_one_param_from_template(arg):
"""Expected inclusion_one_param_from_template __doc__"""
return {"result": "inclusion_one_param_from_template - Expected result: %s" % arg}
inclusion_one_param_from_template.anything = "Expected inclusion_one_param_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=False)
def inclusion_explicit_no_context(arg):
"""Expected inclusion_explicit_no_context __doc__"""
return {"result": "inclusion_explicit_no_context - Expected result: %s" % arg}
inclusion_explicit_no_context.anything = "Expected inclusion_explicit_no_context __dict__"
@register.inclusion_tag(get_template('inclusion.html'), takes_context=False)
def inclusion_explicit_no_context_from_template(arg):
"""Expected inclusion_explicit_no_context_from_template __doc__"""
return {"result": "inclusion_explicit_no_context_from_template - Expected result: %s" % arg}
inclusion_explicit_no_context_from_template.anything = "Expected inclusion_explicit_no_context_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_no_params_with_context(context):
"""Expected inclusion_no_params_with_context __doc__"""
return {"result": "inclusion_no_params_with_context - Expected result (context value: %s)" % context['value']}
inclusion_no_params_with_context.anything = "Expected inclusion_no_params_with_context __dict__"
@register.inclusion_tag(get_template('inclusion.html'), takes_context=True)
def inclusion_no_params_with_context_from_template(context):
"""Expected inclusion_no_params_with_context_from_template __doc__"""
return {"result": "inclusion_no_params_with_context_from_template - Expected result (context value: %s)" % context['value']}
inclusion_no_params_with_context_from_template.anything = "Expected inclusion_no_params_with_context_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_params_and_context(context, arg):
"""Expected inclusion_params_and_context __doc__"""
return {"result": "inclusion_params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)}
inclusion_params_and_context.anything = "Expected inclusion_params_and_context __dict__"
@register.inclusion_tag(get_template('inclusion.html'), takes_context=True)
def inclusion_params_and_context_from_template(context, arg):
"""Expected inclusion_params_and_context_from_template __doc__"""
return {"result": "inclusion_params_and_context_from_template - Expected result (context value: %s): %s" % (context['value'], arg)}
inclusion_params_and_context_from_template.anything = "Expected inclusion_params_and_context_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_two_params(one, two):
"""Expected inclusion_two_params __doc__"""
return {"result": "inclusion_two_params - Expected result: %s, %s" % (one, two)}
inclusion_two_params.anything = "Expected inclusion_two_params __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_two_params_from_template(one, two):
"""Expected inclusion_two_params_from_template __doc__"""
return {"result": "inclusion_two_params_from_template - Expected result: %s, %s" % (one, two)}
inclusion_two_params_from_template.anything = "Expected inclusion_two_params_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_one_default(one, two='hi'):
"""Expected inclusion_one_default __doc__"""
return {"result": "inclusion_one_default - Expected result: %s, %s" % (one, two)}
inclusion_one_default.anything = "Expected inclusion_one_default __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_one_default_from_template(one, two='hi'):
"""Expected inclusion_one_default_from_template __doc__"""
return {"result": "inclusion_one_default_from_template - Expected result: %s, %s" % (one, two)}
inclusion_one_default_from_template.anything = "Expected inclusion_one_default_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_unlimited_args(one, two='hi', *args):
"""Expected inclusion_unlimited_args __doc__"""
return {"result": "inclusion_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))}
inclusion_unlimited_args.anything = "Expected inclusion_unlimited_args __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_unlimited_args_from_template(one, two='hi', *args):
"""Expected inclusion_unlimited_args_from_template __doc__"""
return {"result": "inclusion_unlimited_args_from_template - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))}
inclusion_unlimited_args_from_template.anything = "Expected inclusion_unlimited_args_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_only_unlimited_args(*args):
"""Expected inclusion_only_unlimited_args __doc__"""
return {"result": "inclusion_only_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in args))}
inclusion_only_unlimited_args.anything = "Expected inclusion_only_unlimited_args __dict__"
@register.inclusion_tag(get_template('inclusion.html'))
def inclusion_only_unlimited_args_from_template(*args):
"""Expected inclusion_only_unlimited_args_from_template __doc__"""
return {"result": "inclusion_only_unlimited_args_from_template - Expected result: %s" % (', '.join(six.text_type(arg) for arg in args))}
inclusion_only_unlimited_args_from_template.anything = "Expected inclusion_only_unlimited_args_from_template __dict__"
@register.inclusion_tag('test_incl_tag_current_app.html', takes_context=True)
def inclusion_tag_current_app(context):
"""Expected inclusion_tag_current_app __doc__"""
return {}
inclusion_tag_current_app.anything = "Expected inclusion_tag_current_app __dict__"
@register.inclusion_tag('test_incl_tag_use_l10n.html', takes_context=True)
def inclusion_tag_use_l10n(context):
"""Expected inclusion_tag_use_l10n __doc__"""
return {}
inclusion_tag_use_l10n.anything = "Expected inclusion_tag_use_l10n __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected inclusion_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return {"result": "inclusion_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)}
inclusion_unlimited_args_kwargs.anything = "Expected inclusion_unlimited_args_kwargs __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_tag_without_context_parameter(arg):
"""Expected inclusion_tag_without_context_parameter __doc__"""
return {}
inclusion_tag_without_context_parameter.anything = "Expected inclusion_tag_without_context_parameter __dict__"
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag
def assignment_one_param(arg):
"""Expected assignment_one_param __doc__"""
return "assignment_one_param - Expected result: %s" % arg
assignment_one_param.anything = "Expected assignment_one_param __dict__"
@register.assignment_tag(takes_context=False)
def assignment_explicit_no_context(arg):
"""Expected assignment_explicit_no_context __doc__"""
return "assignment_explicit_no_context - Expected result: %s" % arg
assignment_explicit_no_context.anything = "Expected assignment_explicit_no_context __dict__"
@register.assignment_tag(takes_context=True)
def assignment_no_params_with_context(context):
"""Expected assignment_no_params_with_context __doc__"""
return "assignment_no_params_with_context - Expected result (context value: %s)" % context['value']
assignment_no_params_with_context.anything = "Expected assignment_no_params_with_context __dict__"
@register.assignment_tag(takes_context=True)
def assignment_params_and_context(context, arg):
"""Expected assignment_params_and_context __doc__"""
return "assignment_params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
assignment_params_and_context.anything = "Expected assignment_params_and_context __dict__"
@register.assignment_tag
def assignment_two_params(one, two):
"""Expected assignment_two_params __doc__"""
return "assignment_two_params - Expected result: %s, %s" % (one, two)
assignment_two_params.anything = "Expected assignment_two_params __dict__"
@register.assignment_tag
def assignment_one_default(one, two='hi'):
"""Expected assignment_one_default __doc__"""
return "assignment_one_default - Expected result: %s, %s" % (one, two)
assignment_one_default.anything = "Expected assignment_one_default __dict__"
@register.assignment_tag
def assignment_unlimited_args(one, two='hi', *args):
"""Expected assignment_unlimited_args __doc__"""
return "assignment_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))
assignment_unlimited_args.anything = "Expected assignment_unlimited_args __dict__"
@register.assignment_tag
def assignment_only_unlimited_args(*args):
"""Expected assignment_only_unlimited_args __doc__"""
return "assignment_only_unlimited_args - Expected result: %s" % ', '.join(six.text_type(arg) for arg in args)
assignment_only_unlimited_args.anything = "Expected assignment_only_unlimited_args __dict__"
@register.assignment_tag
def assignment_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected assignment_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "assignment_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)
assignment_unlimited_args_kwargs.anything = "Expected assignment_unlimited_args_kwargs __dict__"
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = "Expected assignment_tag_without_context_parameter __dict__"
|
|
#!/usr/bin/env python
#
# svnmerge-migrate-history-remotely.py: Remotely migrate merge history from
# svnmerge.py's format to Subversion 1.5's format.
#
# ====================================================================
# Copyright (c) 2008 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
# This software consists of voluntary contributions made by many
# individuals. For exact contribution history, see the revision
# history and logs, available at http://subversion.tigris.org/.
# ====================================================================
# $HeadURL$
# $Date$
# $Author$
# $Rev$
### THE SVNMERGE HISTORY MIGRATION ALGORITHM EMPLOYED HEREIN
###
### 1. Fetch properties for PATH, looking specifically for
### 'svnmerge-blocked', and 'svnmerge-integrated'.
###
### 2. Convert properties into real mergeinfo. NOTE: svnmerge-*
### properties have a slightly different and more flexible syntax.
###
### 3. Combine mergeinfos together.
###
### 4. [non-naive] Subtract natural history of the merge target from
### its own mergeinfo.
###
### 5. [non-naive] Filter mergeinfo by merge source natural history
### (so that mergeinfo only reflects real locations).
###
### 6. Convert mergeinfo back into a property value. If it reflects
### a change over the previously existing mergeinfo, or there were
### some svnmerge-* properties to delete, then write the new
### mergeinfo and delete the svnmerge-* properties.
import sys
import os
import urllib
import getopt
try:
my_getopt = getopt.gnu_getopt
except AttributeError:
my_getopt = getopt.getopt
try:
from svn import client, ra, core
svn_version = (core.SVN_VER_MAJOR, core.SVN_VER_MINOR, core.SVN_VER_PATCH)
if svn_version < (1, 5, 0):
raise ImportError
except ImportError:
raise Exception, \
"Subversion Python bindings version 1.5.0 or newer required."
### -------------------------------------------------------------------------
def errput(msg, only_a_warning=False):
"""Output MSG to stderr, possibly decorating it as an error
instead of just a warning as determined by ONLY_A_WARNING."""
if not only_a_warning:
sys.stderr.write("ERROR: ")
sys.stderr.write(msg + "\n")
def make_optrev(rev):
"""Return an svn_opt_revision_t build from revision specifier REV.
REV maybe be an integer value or one of the following string
specifiers: 'HEAD', 'BASE'."""
try:
revnum = int(rev)
kind = core.svn_opt_revision_number
except ValueError:
revnum = -1
if rev == 'HEAD':
kind = core.svn_opt_revision_head
elif rev == 'BASE':
kind = core.svn_opt_revision_base
else:
raise Exception("Unsupported revision specified '%s'" % str(rev))
optrev = core.svn_opt_revision_t()
optrev.kind = kind
optrev.value.number = revnum
return optrev
def svnmerge_prop_to_mergeinfo(svnmerge_prop_val):
"""Parse svnmerge-* property value SVNMERGE_PROP_VAL (which uses
any whitespace for delimiting sources and stores source paths
URI-encoded) into Subversion mergeinfo."""
if svnmerge_prop_val is None:
return None
# First we convert the svnmerge prop value into an svn:mergeinfo
# prop value, then we parse it into mergeinfo.
sources = svnmerge_prop_val.split()
svnmerge_prop_val = ''
for source in sources:
pieces = source.split(':')
if not (len(pieces) == 2 and pieces[1]):
continue
pieces[0] = urllib.unquote(pieces[0])
svnmerge_prop_val = svnmerge_prop_val + '%s\n' % (':'.join(pieces))
return core.svn_mergeinfo_parse(svnmerge_prop_val or '')
def mergeinfo_merge(mergeinfo1, mergeinfo2):
"""Like svn.core.svn_mergeinfo_merge(), but preserves None-ness."""
if mergeinfo1 is None and mergeinfo2 is None:
return None
if mergeinfo1 is None:
return mergeinfo2
if mergeinfo2 is None:
return mergeinfo1
return core.svn_mergeinfo_merge(mergeinfo1, mergeinfo2)
def relative_path_from_urls(root_url, url, leading_slash=False):
"""Return the relative path (not URI-encoded) created by
subtracting from URL the ROOT_URL. Add a LEADING_SLASH if
requested."""
if root_url == url:
return leading_slash and '/' or ''
assert url.startswith(root_url)
if leading_slash:
return urllib.unquote(url[len(root_url):])
else:
return urllib.unquote(url[len(root_url)+1:])
def pretty_print_mergeinfo(mergeinfo, indent=0):
"""Print MERGEINFO hash, one source per line, with a given INDENT."""
mstr = core.svn_mergeinfo_to_string(mergeinfo)
sys.stdout.write('\n'.join(map(lambda x: indent * ' ' + x,
filter(None, mstr.split('\n')))) + '\n')
### -------------------------------------------------------------------------
class SvnClient:
"""Subversion client operation abstraction."""
def __init__(self, config_dir=None):
core.svn_config_ensure(config_dir)
self.ctx = client.ctx_t()
self.ctx.auth_baton = core.svn_auth_open([
client.get_simple_provider(),
client.get_username_provider(),
client.get_ssl_server_trust_file_provider(),
client.get_ssl_client_cert_file_provider(),
client.get_ssl_client_cert_pw_file_provider(),
])
self.ctx.config = core.svn_config_get_config(config_dir)
if config_dir is not None:
core.svn_auth_set_parameter(self.ctx.auth_baton,
core.SVN_AUTH_PARAM_CONFIG_DIR,
config_dir)
self.ra_callbacks = ra.callbacks_t()
self.ra_callbacks.auth_baton = self.ctx.auth_baton
self.base_optrev = make_optrev('BASE')
def get_path_urls(self, path):
"""Return a 2-tuple containing the repository URL associated
with the versioned working file or directory located at
PATH and the repository URL for the same."""
infos = []
def _info_cb(infopath, info, pool, retval=infos):
infos.append(info)
client.svn_client_info(path, self.base_optrev, self.base_optrev,
_info_cb, 0, self.ctx)
assert len(infos) == 1
return infos[0].URL, infos[0].repos_root_URL
def get_path_revision(self, path):
"""Return the current base revision of versioned file or
directory PATH."""
infos = []
def _info_cb(infopath, info, pool, retval=infos):
infos.append(info)
client.svn_client_info(path, self.base_optrev, self.base_optrev,
_info_cb, 0, self.ctx)
assert len(infos) == 1
return infos[0].rev
def get_path_mergeinfo(self, path, root_url=None):
mergeinfo = client.mergeinfo_get_merged(path, self.base_optrev,
self.ctx)
if not mergeinfo:
return mergeinfo
if not root_url:
path_url, root_url = self.get_path_urls(path)
if not root_url:
ras = self.cc.open_ra_session(url)
root_url = ra.get_repos_root(ras)
assert root_url
new_mergeinfo = {}
for key, value in mergeinfo.items():
new_key = relative_path_from_urls(root_url, key, True)
new_mergeinfo[new_key] = value
return new_mergeinfo
def get_path_property(self, path, propname):
rev = self.base_optrev
prophash, revnum = client.propget3(propname, path, rev, rev,
core.svn_depth_empty, None,
self.ctx)
return prophash.get(path, None)
def set_path_property(self, path, propname, propval):
client.propset3(propname, propval, path, core.svn_depth_empty,
0, core.SVN_INVALID_REVNUM, None, None, self.ctx)
def get_history_as_mergeinfo(self, ra_session, rel_path, rev,
oldest_rev=core.SVN_INVALID_REVNUM):
"""Return the natural history of REL_PATH in REV, between OLDEST_REV
and REV, as mergeinfo. If OLDEST_REV is core.SVN_INVALID_REVNUM,
all of PATH's history prior to REV will be returned. REL_PATH is
relative to the session URL of RA_SESSION.
(Adapted from Subversion's svn_client__get_history_as_mergeinfo().)"""
# Fetch the location segments in the history.
location_segments = []
def _segment_receiver(segment, pool):
location_segments.append(segment)
ra.get_location_segments(ra_session, rel_path, rev, rev,
oldest_rev, _segment_receiver)
# Location segments come in youngest to oldest. But we rather
# need oldest-to-youngest for proper revision range ordering.
location_segments.sort(lambda a, b: cmp(a.range_start, b.range_start))
# Transform location segments into merge sources and ranges.
mergeinfo = {}
for segment in location_segments:
if segment.path is None:
continue
source_path = '/' + segment.path
path_ranges = mergeinfo.get(source_path, [])
range = core.svn_merge_range_t()
range.start = max(segment.range_start - 1, 0)
range.end = segment.range_end
range.inheritable = 1
path_ranges.append(range)
mergeinfo[source_path] = path_ranges
return mergeinfo
def open_ra_session(self, session_url):
return ra.open(session_url, self.ra_callbacks, None, self.ctx.config)
### -------------------------------------------------------------------------
class SvnmergeHistoryMigrator:
"""svnmerge.py tracking data conversion class."""
def __init__(self, client_context, verbose=False, naive=False):
self.cc = client_context
self.verbose = verbose
self.naive = naive
def migrate_path(self, path):
sys.stdout.write("Searching for merge tracking information...\n")
# Get svnmerge-integrated property for PATH, as Subversion mergeinfo.
integrated_mergeinfo = svnmerge_prop_to_mergeinfo(
self.cc.get_path_property(path, 'svnmerge-integrated'))
if integrated_mergeinfo and self.verbose:
sys.stdout.write("Found svnmerge-integrated:\n")
pretty_print_mergeinfo(integrated_mergeinfo, 3)
# Get svnmerge-blocked property for PATH, as Subversion mergeinfo.
blocked_mergeinfo = svnmerge_prop_to_mergeinfo(
self.cc.get_path_property(path, 'svnmerge-blocked'))
if blocked_mergeinfo and self.verbose:
sys.stdout.write("Found svnmerge-blocked:\n")
pretty_print_mergeinfo(blocked_mergeinfo, 3)
# No svnmerge tracking data? Nothing to do.
if not (integrated_mergeinfo or blocked_mergeinfo):
errput("No svnmerge.py tracking data found for '%s'." % (path),
True)
return
# Fetch Subversion mergeinfo for PATH. Hopefully there is
# none, but if there is, we'll assume folks want to keep it.
orig_mergeinfo = self.cc.get_path_mergeinfo(path)
if orig_mergeinfo and self.verbose:
sys.stdout.write("Found Subversion mergeinfo:\n")
pretty_print_mergeinfo(orig_mergeinfo, 3)
# Merge all our mergeinfos together.
new_mergeinfo = mergeinfo_merge(orig_mergeinfo, integrated_mergeinfo)
new_mergeinfo = mergeinfo_merge(new_mergeinfo, blocked_mergeinfo)
# Unless we're doing a naive migration (or we've no, or only
# empty, mergeinfo anyway), start trying to cleanup after
# svnmerge.py's history-ignorant initialization.
if not self.naive and new_mergeinfo:
sys.stdout.write("Sanitizing mergeinfo (this can take a "
"while)...\n")
# What we need:
# - the relative path in the repository for PATH
# - repository root URL and an RA session rooted thereat
# - the base revision of PATH
path_url, root_url = self.cc.get_path_urls(path)
if root_url:
ras = self.cc.open_ra_session(root_url)
else:
ras = self.cc.open_ra_session(path_url)
root_url = ra.get_repos_root(ras)
ra.reparent(ras, root_url)
assert path_url.startswith(root_url)
rel_path = relative_path_from_urls(root_url, path_url)
path_rev = self.cc.get_path_revision(path)
# We begin by subtracting the natural history of the merge
# target from its own mergeinfo.
implicit_mergeinfo = \
self.cc.get_history_as_mergeinfo(ras, rel_path, path_rev)
if self.verbose:
sys.stdout.write(" subtracting natural history:\n")
pretty_print_mergeinfo(implicit_mergeinfo, 6)
new_mergeinfo = core.svn_mergeinfo_remove(implicit_mergeinfo,
new_mergeinfo)
if self.verbose:
sys.stdout.write(" remaining mergeinfo to be filtered:\n")
pretty_print_mergeinfo(new_mergeinfo, 6)
# Unfortunately, svnmerge.py tends to initialize using
# oft-bogus revision ranges like 1-SOMETHING when the
# merge source didn't even exist in r1. So if the natural
# history of a branch begins in some revision other than
# r1, there's still going to be cruft revisions left in
# NEW_MERGEINFO after subtracting the natural history.
# So, we also examine the natural history of the merge
# sources, and use that as a filter for the explicit
# mergeinfo we've calculated so far.
mergeinfo_so_far = new_mergeinfo
new_mergeinfo = {}
for source_path, ranges in mergeinfo_so_far.items():
# If by some chance it is the case that /path:RANGE1
# and /path:RANGE2 a) represent different lines of
# history, and b) were combined into
# /path:RANGE1+RANGE2 (due to the ranges being
# contiguous), we'll foul this up. But the chances
# are preeeeeeeetty slim.
for range in ranges:
try:
history = self.cc.get_history_as_mergeinfo(
ras, source_path[1:], range.end, range.start + 1)
if self.verbose:
sys.stdout.write(" new sanitized chunk:\n")
pretty_print_mergeinfo(history, 6)
new_mergeinfo = mergeinfo_merge(new_mergeinfo, history)
except core.SubversionException as e:
if not (e.apr_err == core.SVN_ERR_FS_NOT_FOUND
or e.apr_err == core.SVN_ERR_FS_NO_SUCH_REVISION):
raise
if self.verbose:
sys.stdout.write("New converted mergeinfo:\n")
pretty_print_mergeinfo(new_mergeinfo, 3)
sys.stdout.write("Locally removing svnmerge properties and setting "
"new svn:mergeinfo property.\n")
self.cc.set_path_property(path, 'svnmerge-integrated', None)
self.cc.set_path_property(path, 'svnmerge-blocked', None)
self.cc.set_path_property(path, 'svn:mergeinfo',
core.svn_mergeinfo_to_string(new_mergeinfo))
### -------------------------------------------------------------------------
def usage_and_exit(errmsg=None):
stream = errmsg and sys.stderr or sys.stdout
stream.write("""Usage: %s [OPTIONS] BRANCH_PATH
Convert svnmerge.py tracking data found on the working copy
BRANCH_PATH into Subversion merge tracking data as a set of local
property modifications. If BRANCH_PATH already has Subversion merge
tracking data, preserve it during the conversion process. After this
script runs successfully, you can review and then commit the local
property modifications. This script will *not* touch the contents of
any files at or under BRANCH_PATH -- it only effects property
modifications, which you can revert rather than commit if you so
desire.
NOTE: BRANCH_PATH need only be a depth-empty checkout of the branch
whose svnmerge.py tracking data you wish to convert.
NOTE: This script requires remote read access to the Subversion
repository whose working copy data you are trying to convert, but
currently does not implement prompting authentication providers. You
must have valid cached credentials for this script to work.
Options:
--help (-h, -?) Show this usage message
--verbose (-v) Run in verbose mode
--naive Run a naive conversion (faster, but might generate
non-ideal results)
""" % (os.path.basename(sys.argv[0])))
if errmsg:
stream.write("\nERROR: %s\n" % (errmsg))
sys.exit(errmsg and 1 or 0)
def main():
try:
opts, args = my_getopt(sys.argv[1:], "vh?",
["verbose", "naive-mode", "help"])
except:
raise
usage_and_exit("Unable to process arguments/options.")
# Process arguments.
if not args:
usage_and_exit("No working copy path provided.")
else:
branch_path = core.svn_path_canonicalize(args[0])
# Process options.
verbose = naive_mode = False
for opt, value in opts:
if opt == "--help" or opt in ("-h", "-?"):
usage_and_exit()
elif opt == "--verbose" or opt == "-v":
verbose = True
elif opt == "--naive-mode":
naive_mode = True
else:
usage_and_exit("Unknown option '%s'" % (opt))
# Do the work.
shm = SvnmergeHistoryMigrator(SvnClient(), verbose, naive_mode)
shm.migrate_path(branch_path)
if __name__ == "__main__":
main()
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
from itertools import izip
import logging
import re
from net import bsonrpc
from net import gorpc
from vtdb import cursorv3
from vtdb import dbexceptions
from vtdb import field_types
from vtdb import vtdb_logger
_errno_pattern = re.compile(r'\(errno (\d+)\)')
def log_exception(method):
"""Decorator for logging the exception from vtgatev2.
The convert_exception method interprets and recasts the exceptions
raised by lower-layer. The inner function calls the appropriate vtdb_logger
method based on the exception raised.
Args:
method: Method that takes exc, *args, where exc is an exception raised
by calling code, args are additional args for the exception.
Returns:
Decorated method.
"""
def _log_exception(exc, *args):
logger_object = vtdb_logger.get_logger()
new_exception = method(exc, *args)
if isinstance(new_exception, dbexceptions.IntegrityError):
logger_object.integrity_error(new_exception)
else:
logger_object.vtgatev2_exception(new_exception)
return new_exception
return _log_exception
def handle_app_error(exc_args):
msg = str(exc_args[0]).lower()
if msg.startswith('request_backlog'):
return dbexceptions.RequestBacklog(exc_args)
match = _errno_pattern.search(msg)
if match:
mysql_errno = int(match.group(1))
# Prune the error message to truncate the query string
# returned by mysql as it contains bind variables.
if mysql_errno == 1062:
parts = _errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
new_args = (pruned_msg,) + tuple(exc_args[1:])
return dbexceptions.IntegrityError(new_args)
return dbexceptions.DatabaseError(exc_args)
@log_exception
def convert_exception(exc, *args):
new_args = exc.args + args
if isinstance(exc, gorpc.TimeoutError):
return dbexceptions.TimeoutError(new_args)
elif isinstance(exc, gorpc.AppError):
return handle_app_error(new_args)
elif isinstance(exc, gorpc.ProgrammingError):
return dbexceptions.ProgrammingError(new_args)
elif isinstance(exc, gorpc.GoRpcError):
return dbexceptions.FatalError(new_args)
return exc
def _create_req(sql, new_binds, tablet_type, not_in_transaction):
new_binds = field_types.convert_bind_vars(new_binds)
req = {
'Sql': sql,
'BindVariables': new_binds,
'TabletType': tablet_type,
'NotInTransaction': not_in_transaction,
}
return req
class VTGateConnection(object):
"""This utilizes the V3 API of VTGate."""
session = None
_stream_fields = None
_stream_conversions = None
_stream_result = None
_stream_result_index = None
def __init__(self, addr, timeout, user=None, password=None,
keyfile=None, certfile=None):
self.addr = addr
self.timeout = timeout
self.client = bsonrpc.BsonRpcClient(addr, timeout, user, password,
keyfile=keyfile, certfile=certfile)
self.logger_object = vtdb_logger.get_logger()
def __str__(self):
return '<VTGateConnection %s >' % self.addr
def dial(self):
try:
if not self.is_closed():
self.close()
self.client.dial()
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def close(self):
if self.session:
self.rollback()
self.client.close()
def is_closed(self):
return self.client.is_closed()
def cursor(self, *pargs, **kwargs):
cursorclass = None
if 'cursorclass' in kwargs:
cursorclass = kwargs['cursorclass']
del kwargs['cursorclass']
if cursorclass is None:
cursorclass = cursorv3.Cursor
return cursorclass(self, *pargs, **kwargs)
def begin(self):
try:
response = self.client.call('VTGate.Begin', None)
self.session = response.reply
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def commit(self):
try:
session = self.session
self.session = None
self.client.call('VTGate.Commit', session)
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def rollback(self):
try:
session = self.session
self.session = None
self.client.call('VTGate.Rollback', session)
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
def _add_session(self, req):
if self.session:
req['Session'] = self.session
def _update_session(self, response):
if 'Session' in response.reply and response.reply['Session']:
self.session = response.reply['Session']
def _execute(
self, sql, bind_variables, tablet_type, not_in_transaction=False):
req = _create_req(sql, bind_variables, tablet_type, not_in_transaction)
self._add_session(req)
fields = []
conversions = []
results = []
rowcount = 0
lastrowid = 0
try:
response = self.client.call('VTGate.Execute', req)
self._update_session(response)
reply = response.reply
if 'Error' in response.reply and response.reply['Error']:
raise gorpc.AppError(response.reply['Error'], 'VTGate.Execute')
if 'Result' in reply:
res = reply['Result']
for field in res['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in res['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = res['RowsAffected']
lastrowid = res['InsertId']
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables)
raise convert_exception(e, str(self), sql)
except:
logging.exception('gorpc low-level error')
raise
return results, rowcount, lastrowid, fields
def _execute_batch(
self, sql_list, bind_variables_list, tablet_type, as_transaction):
query_list = []
for sql, bind_vars in zip(sql_list, bind_variables_list):
query = {}
query['Sql'] = sql
query['BindVariables'] = field_types.convert_bind_vars(bind_vars)
query_list.append(query)
rowsets = []
try:
req = {
'Queries': query_list,
'TabletType': tablet_type,
'AsTransaction': as_transaction,
}
self._add_session(req)
response = self.client.call('VTGate.ExecuteBatch', req)
self._update_session(response)
if 'Error' in response.reply and response.reply['Error']:
raise gorpc.AppError(response.reply['Error'], 'VTGate.ExecuteBatch')
for reply in response.reply['List']:
fields = []
conversions = []
results = []
rowcount = 0
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
rowsets.append((results, rowcount, lastrowid, fields))
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables_list)
raise convert_exception(e, str(self), sql_list)
except:
logging.exception('gorpc low-level error')
raise
return rowsets
# we return the fields for the response, and the column conversions
# the conversions will need to be passed back to _stream_next
# (that way we avoid using a member variable here for such a corner case)
def _stream_execute(
self, sql, bind_variables, tablet_type, not_in_transaction=False):
req = _create_req(sql, bind_variables, tablet_type, not_in_transaction)
self._add_session(req)
self._stream_fields = []
self._stream_conversions = []
self._stream_result = None
self._stream_result_index = 0
try:
self.client.stream_call('VTGate.StreamExecute', req)
first_response = self.client.stream_next()
reply = first_response.reply['Result']
for field in reply['Fields']:
self._stream_fields.append((field['Name'], field['Type']))
self._stream_conversions.append(
field_types.conversions.get(field['Type']))
except gorpc.GoRpcError as e:
self.logger_object.log_private_data(bind_variables)
raise convert_exception(e, str(self), sql)
except:
logging.exception('gorpc low-level error')
raise
return None, 0, 0, self._stream_fields
def _stream_next(self):
# Terminating condition
if self._stream_result_index is None:
return None
# See if we need to read more or whether we just pop the next row.
while self._stream_result is None:
try:
self._stream_result = self.client.stream_next()
if self._stream_result is None:
self._stream_result_index = None
return None
# A session message, if any comes separately with no rows
if ('Session' in self._stream_result.reply and
self._stream_result.reply['Session']):
self.session = self._stream_result.reply['Session']
self._stream_result = None
continue
# An extra fields message if it is scatter over streaming, ignore it
if not self._stream_result.reply['Result']['Rows']:
self._stream_result = None
continue
except gorpc.GoRpcError as e:
raise convert_exception(e, str(self))
except:
logging.exception('gorpc low-level error')
raise
row = tuple(_make_row(
self._stream_result.reply['Result']['Rows'][self._stream_result_index],
self._stream_conversions))
# If we are reading the last row, set us up to read more data.
self._stream_result_index += 1
if (self._stream_result_index ==
len(self._stream_result.reply['Result']['Rows'])):
self._stream_result = None
self._stream_result_index = 0
return row
def _make_row(row, conversions):
converted_row = []
for conversion_func, field_data in izip(conversions, row):
if field_data is None:
v = None
elif conversion_func:
v = conversion_func(field_data)
else:
v = field_data
converted_row.append(v)
return converted_row
def connect(*pargs, **kwargs):
conn = VTGateConnection(*pargs, **kwargs)
conn.dial()
return conn
|
|
""" Simplified "overlays" on top of TensorFlow graphs.
TensorFlow graphs are often too low-level to represent our conceptual
understanding of a model. This module provides abstractions to represent
simplified graphs on top of a TensorFlow graph:
`OverlayGraph` - A subgraph of a TensorFlow computational graph. Each node
corresponds to a node in the original TensorFlow graph, and edges
correspond to paths through the original graph.
`OverlayNode` - A node in an OverlayGraph. Corresponds to a node in a
TensorFlow graph.
# Example usage:
```
with tf.Graph().as_default() as graph:
model = models.InceptionV1()
tf.import_graph_def(model.graph_def)
overlay = OverlayGraph(graph)
```
"""
from collections import defaultdict
import numpy as np
import tensorflow as tf
class OverlayNode():
"""A node in an OverlayGraph. Corresponds to a TensorFlow Tensor.
"""
def __init__(self, name, overlay_graph):
self.name = name
self.overlay_graph = overlay_graph
self.tf_graph = overlay_graph.tf_graph
try:
self.tf_node = self.tf_graph.get_tensor_by_name(name)
except:
self.tf_node = None
self.sub_structure = None
@staticmethod
def as_name(node):
if isinstance(node, str):
return node
elif isinstance(node, (OverlayNode, tf.Tensor)):
return node.name
else:
raise NotImplementedError
def __repr__(self):
return "<%s: %s>" % (self.name, self.op)
@property
def op(self):
return self.tf_node.op.type
@property
def inputs(self):
return self.overlay_graph.node_to_inputs[self]
@property
def consumers(self):
return self.overlay_graph.node_to_consumers[self]
@property
def extended_inputs(self):
return self.overlay_graph.node_to_extended_inputs[self]
@property
def extended_consumers(self):
return self.overlay_graph.node_to_extended_consumers[self]
@property
def gcd(self):
return self.overlay_graph.gcd(self.inputs)
@property
def lcm(self):
return self.overlay_graph.lcm(self.consumers)
class OverlayStructure():
"""Represents a sub-structure of a OverlayGraph.
Often, we want to find structures within a graph, such as branches and
sequences, to assist with graph layout for users.
An OverlayStructure represents such a structure. It is typically used
in conjunction with OverlayGraph.collapse_structures() to parse a graph.
"""
def __init__(self, structure_type, structure):
self.structure_type = structure_type
self.structure = structure # A dictionary
self.children = sum([component if isinstance(component, (list, tuple)) else [component]
for component in structure.values()], [])
def __contains__(self, item):
return OverlayNode.as_name(item) in [n.name for n in self.children]
class OverlayGraph():
"""A subgraph of a TensorFlow computational graph.
TensorFlow graphs are often too low-level to represent our conceptual
understanding of a model
OverlayGraph can be used to represent a simplified version of a TensorFlow
graph. Each node corresponds to a node in the original TensorFlow graph, and
edges correspond to paths through the original graph.
"""
def __init__(self, tf_graph, nodes=None, no_pass_through=None, prev_overlay=None):
self.tf_graph = tf_graph
if nodes is None:
nodes = []
for op in tf_graph.get_operations():
nodes.extend([out.name for out in op.outputs])
self.name_map = {name: OverlayNode(name, self) for name in nodes}
self.nodes = [self.name_map[name] for name in nodes]
self.no_pass_through = [] if no_pass_through is None else no_pass_through
self.node_to_consumers = defaultdict(set)
self.node_to_inputs = defaultdict(set)
self.prev_overlay = prev_overlay
for node in self.nodes:
for inp in self._get_overlay_inputs(node):
self.node_to_inputs[node].add(inp)
self.node_to_consumers[inp].add(node)
self.node_to_extended_consumers = defaultdict(set)
self.node_to_extended_inputs = defaultdict(set)
for node in self.nodes:
for inp in self.node_to_inputs[node]:
self.node_to_extended_inputs[node].add(inp)
self.node_to_extended_inputs[node].update(self.node_to_extended_inputs[inp])
for node in self.nodes[::-1]:
for out in self.node_to_consumers[node]:
self.node_to_extended_consumers[node].add(out)
self.node_to_extended_consumers[node].update(self.node_to_extended_consumers[out])
def __getitem__(self, index):
return self.name_map[OverlayNode.as_name(index)]
def __contains__(self, item):
return OverlayNode.as_name(item) in self.name_map
def get_tf_node(self, node):
name = OverlayNode.as_name(node)
return self.tf_graph.get_tensor_by_name(name)
def _get_overlay_inputs(self, node):
if self.prev_overlay:
raw_inps = self.prev_overlay[node].inputs
else:
raw_inps = self.get_tf_node(node).op.inputs
overlay_inps = []
for inp in raw_inps:
if inp.name.startswith('^'): # skip control inputs
continue
if inp in self:
overlay_inps.append(self[inp])
elif node.name not in self.no_pass_through:
overlay_inps.extend(self._get_overlay_inputs(inp))
return overlay_inps
def graphviz(self, groups=None):
"""Print graphviz graph."""
print("digraph G {")
if groups is not None:
for root, group in groups.items():
print("")
print((" subgraph", "cluster_%s" % root.name.replace("/", "_"), "{"))
print((" label = \"%s\"") % (root.name))
for node in group:
print((" \"%s\"") % node.name)
print(" }")
for node in self.nodes:
for inp in node.inputs:
print(" ", '"' + inp.name + '"', " -> ", '"' + (node.name) + '"')
print("}")
def filter(self, keep_nodes, pass_through=True):
keep_nodes = [self[n].name for n in keep_nodes]
old_nodes = set(self.name_map.keys())
new_nodes = set(keep_nodes)
no_pass_through = set(self.no_pass_through)
if not pass_through:
no_pass_through += old_nodes - new_nodes
keep_nodes = [node for node in self.name_map if node in keep_nodes]
new_overlay = OverlayGraph(self.tf_graph, keep_nodes, no_pass_through, prev_overlay=self)
for node in new_overlay.nodes:
node.sub_structure = self[node].sub_structure
return new_overlay
def gcd(self, branches):
"""Greatest common divisor (ie. input) of several nodes."""
branches = [self[node] for node in branches]
branch_nodes = [set([node]) | node.extended_inputs for node in branches]
branch_shared = set.intersection(*branch_nodes)
return max(branch_shared, key=lambda n: self.nodes.index(n))
def lcm(self, branches):
"""Lowest common multiplie (ie. consumer) of several nodes."""
branches = [self[node] for node in branches]
branch_nodes = [set([node]) | node.extended_consumers for node in branches]
branch_shared = set.intersection(*branch_nodes)
return min(branch_shared, key=lambda n: self.nodes.index(n))
def sorted(self, items):
return sorted(items, key=lambda n: self.nodes.index(self[n]))
def collapse_structures(self, structure_map):
keep_nodes = [node.name for node in self.nodes
if not any(node in structure.children for structure in structure_map.values())
or node in structure_map]
new_overlay = self.filter(keep_nodes)
for node in structure_map:
new_overlay[node].sub_structure = structure_map[node]
return new_overlay
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import Any, Dict, Optional
from airflow.providers.apache.spark.hooks.spark_jdbc import SparkJDBCHook
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
from airflow.utils.decorators import apply_defaults
# pylint: disable=too-many-instance-attributes
class SparkJDBCOperator(SparkSubmitOperator):
"""
This operator extends the SparkSubmitOperator specifically for performing data
transfers to/from JDBC-based databases with Apache Spark. As with the
SparkSubmitOperator, it assumes that the "spark-submit" binary is available on the
PATH.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`apache-airflow:howto/operator:SparkJDBCOperator`
:param spark_app_name: Name of the job (default airflow-spark-jdbc)
:type spark_app_name: str
:param spark_conn_id: Connection id as configured in Airflow administration
:type spark_conn_id: str
:param spark_conf: Any additional Spark configuration properties
:type spark_conf: dict
:param spark_py_files: Additional python files used (.zip, .egg, or .py)
:type spark_py_files: str
:param spark_files: Additional files to upload to the container running the job
:type spark_files: str
:param spark_jars: Additional jars to upload and add to the driver and
executor classpath
:type spark_jars: str
:param num_executors: number of executor to run. This should be set so as to manage
the number of connections made with the JDBC database
:type num_executors: int
:param executor_cores: Number of cores per executor
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G)
:type executor_memory: str
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G)
:type driver_memory: str
:param verbose: Whether to pass the verbose flag to spark-submit for debugging
:type verbose: bool
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param principal: The name of the kerberos principal used for keytab
:type principal: str
:param cmd_type: Which way the data should flow. 2 possible values:
spark_to_jdbc: data written by spark from metastore to jdbc
jdbc_to_spark: data written by spark from jdbc to metastore
:type cmd_type: str
:param jdbc_table: The name of the JDBC table
:type jdbc_table: str
:param jdbc_conn_id: Connection id used for connection to JDBC database
:type jdbc_conn_id: str
:param jdbc_driver: Name of the JDBC driver to use for the JDBC connection. This
driver (usually a jar) should be passed in the 'jars' parameter
:type jdbc_driver: str
:param metastore_table: The name of the metastore table,
:type metastore_table: str
:param jdbc_truncate: (spark_to_jdbc only) Whether or not Spark should truncate or
drop and recreate the JDBC table. This only takes effect if
'save_mode' is set to Overwrite. Also, if the schema is
different, Spark cannot truncate, and will drop and recreate
:type jdbc_truncate: bool
:param save_mode: The Spark save-mode to use (e.g. overwrite, append, etc.)
:type save_mode: str
:param save_format: (jdbc_to_spark-only) The Spark save-format to use (e.g. parquet)
:type save_format: str
:param batch_size: (spark_to_jdbc only) The size of the batch to insert per round
trip to the JDBC database. Defaults to 1000
:type batch_size: int
:param fetch_size: (jdbc_to_spark only) The size of the batch to fetch per round trip
from the JDBC database. Default depends on the JDBC driver
:type fetch_size: int
:param num_partitions: The maximum number of partitions that can be used by Spark
simultaneously, both for spark_to_jdbc and jdbc_to_spark
operations. This will also cap the number of JDBC connections
that can be opened
:type num_partitions: int
:param partition_column: (jdbc_to_spark-only) A numeric column to be used to
partition the metastore table by. If specified, you must
also specify:
num_partitions, lower_bound, upper_bound
:type partition_column: str
:param lower_bound: (jdbc_to_spark-only) Lower bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, upper_bound
:type lower_bound: int
:param upper_bound: (jdbc_to_spark-only) Upper bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, lower_bound
:type upper_bound: int
:param create_table_column_types: (spark_to_jdbc-only) The database column data types
to use instead of the defaults, when creating the
table. Data type information should be specified in
the same format as CREATE TABLE columns syntax
(e.g: "name CHAR(64), comments VARCHAR(1024)").
The specified types should be valid spark sql data
types.
"""
# pylint: disable=too-many-arguments,too-many-locals
@apply_defaults
def __init__(
self,
*,
spark_app_name: str = 'airflow-spark-jdbc',
spark_conn_id: str = 'spark-default',
spark_conf: Optional[Dict[str, Any]] = None,
spark_py_files: Optional[str] = None,
spark_files: Optional[str] = None,
spark_jars: Optional[str] = None,
num_executors: Optional[int] = None,
executor_cores: Optional[int] = None,
executor_memory: Optional[str] = None,
driver_memory: Optional[str] = None,
verbose: bool = False,
principal: Optional[str] = None,
keytab: Optional[str] = None,
cmd_type: str = 'spark_to_jdbc',
jdbc_table: Optional[str] = None,
jdbc_conn_id: str = 'jdbc-default',
jdbc_driver: Optional[str] = None,
metastore_table: Optional[str] = None,
jdbc_truncate: bool = False,
save_mode: Optional[str] = None,
save_format: Optional[str] = None,
batch_size: Optional[int] = None,
fetch_size: Optional[int] = None,
num_partitions: Optional[int] = None,
partition_column: Optional[str] = None,
lower_bound: Optional[str] = None,
upper_bound: Optional[str] = None,
create_table_column_types: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._spark_app_name = spark_app_name
self._spark_conn_id = spark_conn_id
self._spark_conf = spark_conf
self._spark_py_files = spark_py_files
self._spark_files = spark_files
self._spark_jars = spark_jars
self._num_executors = num_executors
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._verbose = verbose
self._keytab = keytab
self._principal = principal
self._cmd_type = cmd_type
self._jdbc_table = jdbc_table
self._jdbc_conn_id = jdbc_conn_id
self._jdbc_driver = jdbc_driver
self._metastore_table = metastore_table
self._jdbc_truncate = jdbc_truncate
self._save_mode = save_mode
self._save_format = save_format
self._batch_size = batch_size
self._fetch_size = fetch_size
self._num_partitions = num_partitions
self._partition_column = partition_column
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._create_table_column_types = create_table_column_types
self._hook: Optional[SparkJDBCHook] = None
def execute(self, context: Dict[str, Any]) -> None:
"""Call the SparkSubmitHook to run the provided spark job"""
if self._hook is None:
self._hook = self._get_hook()
self._hook.submit_jdbc_job()
def on_kill(self) -> None:
if self._hook is None:
self._hook = self._get_hook()
self._hook.on_kill()
def _get_hook(self) -> SparkJDBCHook:
return SparkJDBCHook(
spark_app_name=self._spark_app_name,
spark_conn_id=self._spark_conn_id,
spark_conf=self._spark_conf,
spark_py_files=self._spark_py_files,
spark_files=self._spark_files,
spark_jars=self._spark_jars,
num_executors=self._num_executors,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
driver_memory=self._driver_memory,
verbose=self._verbose,
keytab=self._keytab,
principal=self._principal,
cmd_type=self._cmd_type,
jdbc_table=self._jdbc_table,
jdbc_conn_id=self._jdbc_conn_id,
jdbc_driver=self._jdbc_driver,
metastore_table=self._metastore_table,
jdbc_truncate=self._jdbc_truncate,
save_mode=self._save_mode,
save_format=self._save_format,
batch_size=self._batch_size,
fetch_size=self._fetch_size,
num_partitions=self._num_partitions,
partition_column=self._partition_column,
lower_bound=self._lower_bound,
upper_bound=self._upper_bound,
create_table_column_types=self._create_table_column_types,
)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.fluid as fluid
import paddle.fluid.layers.ops as ops
from paddle.fluid.initializer import init_on_cpu
from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter
import paddle.fluid.core as core
from parallel_executor_test_base import TestParallelExecutorBase
import unittest
import math
import os
import numpy as np
# FIXME(zcd): If the neural net has dropout_op, the output of ParallelExecutor
# and Executor is different. Because, for ParallelExecutor, the dropout_op of
# the neural net will be copied N copies(N is the number of device). This will
# lead to the random numbers generated by ParallelExecutor and Executor are different.
# So, if we compare the loss of ParallelExecutor and Executor, we should remove the
# dropout_op.
remove_dropout = False
# FIXME(zcd): If the neural net has batch_norm, the output of ParallelExecutor
# and Executor is different.
remove_bn = False
def squeeze_excitation(input, num_channels, reduction_ratio):
# pool = fluid.layers.pool2d(
# input=input, pool_size=0, pool_type='avg', global_pooling=True)
conv = input
shape = conv.shape
reshape = fluid.layers.reshape(
x=conv, shape=[-1, shape[1], shape[2] * shape[3]])
pool = fluid.layers.reduce_mean(input=reshape, dim=2)
squeeze = fluid.layers.fc(input=pool,
size=num_channels // reduction_ratio,
act='relu')
excitation = fluid.layers.fc(input=squeeze,
size=num_channels,
act='sigmoid')
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
bias_attr=False)
return conv if remove_bn else fluid.layers.batch_norm(
input=conv, act=act, momentum=0.1)
def shortcut(input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out:
if stride == 1:
filter_size = 1
else:
filter_size = 3
return conv_bn_layer(input, ch_out, filter_size, stride)
else:
return input
def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
# The number of first 1x1 convolutional channels for each bottleneck build block
# was halved to reduce the compution cost.
conv0 = conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = conv_bn_layer(
input=conv0,
num_filters=num_filters * 2,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu')
conv2 = conv_bn_layer(
input=conv1, num_filters=num_filters * 2, filter_size=1, act=None)
scale = squeeze_excitation(
input=conv2,
num_channels=num_filters * 2,
reduction_ratio=reduction_ratio)
short = shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
batch_size = 12
img_shape = [3, 224, 224]
def SE_ResNeXt50Small(use_feed):
img = fluid.layers.data(name='image', shape=img_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
conv = conv_bn_layer(
input=img, num_filters=16, filter_size=3, stride=2, act='relu')
conv = conv_bn_layer(
input=conv, num_filters=16, filter_size=3, stride=1, act='relu')
conv = conv_bn_layer(
input=conv, num_filters=16, filter_size=3, stride=1, act='relu')
conv = fluid.layers.pool2d(
input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max')
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 6, 3]
num_filters = [128, 256, 512, 1024]
for block in range(len(depth)):
for i in range(depth[block]):
conv = bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio)
shape = conv.shape
reshape = fluid.layers.reshape(
x=conv, shape=[-1, shape[1], shape[2] * shape[3]])
pool = fluid.layers.reduce_mean(input=reshape, dim=2)
dropout = pool if remove_dropout else fluid.layers.dropout(
x=pool, dropout_prob=0.2, seed=1)
# Classifier layer:
prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = fluid.layers.mean(loss)
return loss
def cosine_decay(learning_rate, step_each_epoch, epochs=120):
"""
Applies cosine decay to the learning rate.
lr = 0.05 * (math.cos(epoch * (math.pi / 120)) + 1)
"""
global_step = _decay_step_counter()
with init_on_cpu():
epoch = ops.floor(global_step / step_each_epoch)
decayed_lr = learning_rate * \
(ops.cos(epoch * (math.pi / epochs)) + 1)/2
return decayed_lr
def optimizer(learning_rate=0.01):
optimizer = fluid.optimizer.Momentum(
learning_rate=cosine_decay(
learning_rate=learning_rate, step_each_epoch=2, epochs=1),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
class TestResnet(TestParallelExecutorBase):
@classmethod
def setUpClass(cls):
os.environ['CPU_NUM'] = str(4)
global remove_dropout
global remove_bn
remove_dropout = False
remove_bn = False
def _init_data(self, batch_size=2, random=True):
np.random.seed(5)
if random:
img = np.random.random(
size=[batch_size] + img_shape).astype(np.float32)
else:
img = np.ones(shape=[batch_size] + img_shape, dtype='float32')
label = [np.random.randint(0, 999) for _ in range(batch_size)]
label = np.array(label).astype(np.int64).reshape(-1, 1)
return img, label
def _compare_reduce_and_allreduce(self,
model,
use_cuda,
iter=20,
delta2=1e-6):
if use_cuda and not core.is_compiled_with_cuda():
return
global remove_bn
remove_bn = True
img, label = self._init_data(batch_size=batch_size)
all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=False,
optimizer=optimizer)
reduce_first_loss, reduce_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=True,
optimizer=optimizer)
for loss in zip(all_reduce_first_loss, reduce_first_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)
for loss in zip(all_reduce_last_loss, reduce_last_loss):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
if not use_cuda:
return
all_reduce_first_loss_seq, all_reduce_last_loss_seq = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=False,
optimizer=optimizer,
enable_sequential_execution=True)
reduce_first_loss_seq, reduce_last_loss_seq = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=True,
optimizer=optimizer,
enable_sequential_execution=True)
for loss in zip(all_reduce_first_loss, all_reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)
for loss in zip(all_reduce_last_loss, all_reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
for loss in zip(reduce_first_loss, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)
for loss in zip(reduce_last_loss, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
for loss in zip(all_reduce_first_loss_seq, reduce_first_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=1e-6)
for loss in zip(all_reduce_last_loss_seq, reduce_last_loss_seq):
self.assertAlmostEquals(loss[0], loss[1], delta=delta2)
def _check_resnet_convergence(self,
model,
use_cuda=True,
use_reduce=False,
iter=20,
delta2=1e-6):
if use_cuda and not core.is_compiled_with_cuda():
return
global remove_dropout
global remove_bn
remove_dropout = True
remove_bn = True
img, label = self._init_data(batch_size=batch_size)
single_first_loss, single_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=use_reduce,
optimizer=optimizer,
use_parallel_executor=False)
parallel_first_loss, parallel_last_loss = self.check_network_convergence(
model,
feed_dict={"image": img,
"label": label},
iter=iter,
batch_size=batch_size,
use_cuda=use_cuda,
use_reduce=use_reduce,
optimizer=optimizer)
self.assertAlmostEquals(
np.mean(parallel_first_loss), single_first_loss[0], delta=1e-6)
self.assertAlmostEquals(
np.mean(parallel_last_loss), single_last_loss[0], delta=delta2)
def test_seresnext_with_learning_rate_decay(self):
self._check_resnet_convergence(model=SE_ResNeXt50Small, use_cuda=True)
self._check_resnet_convergence(
model=SE_ResNeXt50Small, use_cuda=False, iter=2, delta2=1e-3)
def test_seresnext_with_new_strategy(self):
self._compare_reduce_and_allreduce(
model=SE_ResNeXt50Small, use_cuda=True, delta2=1e-2)
self._compare_reduce_and_allreduce(
model=SE_ResNeXt50Small, use_cuda=False, iter=5)
if __name__ == '__main__':
unittest.main()
|
|
# coding: utf-8
"""Tornado handlers for WebSocket <-> ZMQ sockets."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import json
import struct
import warnings
try:
from urllib.parse import urlparse # Py 3
except ImportError:
from urlparse import urlparse # Py 2
import tornado
from tornado import gen, ioloop, web
from tornado.websocket import WebSocketHandler
from IPython.kernel.zmq.session import Session
from IPython.utils.jsonutil import date_default, extract_dates
from IPython.utils.py3compat import cast_unicode
from .handlers import IPythonHandler
def serialize_binary_message(msg):
"""serialize a message as a binary blob
Header:
4 bytes: number of msg parts (nbufs) as 32b int
4 * nbufs bytes: offset for each buffer as integer as 32b int
Offsets are from the start of the buffer, including the header.
Returns
-------
The message serialized to bytes.
"""
# don't modify msg or buffer list in-place
msg = msg.copy()
buffers = list(msg.pop('buffers'))
bmsg = json.dumps(msg, default=date_default).encode('utf8')
buffers.insert(0, bmsg)
nbufs = len(buffers)
offsets = [4 * (nbufs + 1)]
for buf in buffers[:-1]:
offsets.append(offsets[-1] + len(buf))
offsets_buf = struct.pack('!' + 'I' * (nbufs + 1), nbufs, *offsets)
buffers.insert(0, offsets_buf)
return b''.join(buffers)
def deserialize_binary_message(bmsg):
"""deserialize a message from a binary blog
Header:
4 bytes: number of msg parts (nbufs) as 32b int
4 * nbufs bytes: offset for each buffer as integer as 32b int
Offsets are from the start of the buffer, including the header.
Returns
-------
message dictionary
"""
nbufs = struct.unpack('!i', bmsg[:4])[0]
offsets = list(struct.unpack('!' + 'I' * nbufs, bmsg[4:4 * (nbufs + 1)]))
offsets.append(None)
bufs = []
for start, stop in zip(offsets[:-1], offsets[1:]):
bufs.append(bmsg[start:stop])
msg = json.loads(bufs[0].decode('utf8'))
msg['header'] = extract_dates(msg['header'])
msg['parent_header'] = extract_dates(msg['parent_header'])
msg['buffers'] = bufs[1:]
return msg
# ping interval for keeping websockets alive (30 seconds)
WS_PING_INTERVAL = 30000
if os.environ.get('IPYTHON_ALLOW_DRAFT_WEBSOCKETS_FOR_PHANTOMJS', False):
warnings.warn("""Allowing draft76 websocket connections!
This should only be done for testing with phantomjs!""")
from IPython.html import allow76
WebSocketHandler = allow76.AllowDraftWebSocketHandler
# draft 76 doesn't support ping
WS_PING_INTERVAL = 0
class ZMQStreamHandler(WebSocketHandler):
def check_origin(self, origin):
"""Check Origin == Host or Access-Control-Allow-Origin.
Tornado >= 4 calls this method automatically, raising 403 if it returns False.
We call it explicitly in `open` on Tornado < 4.
"""
if self.allow_origin == '*':
return True
host = self.request.headers.get("Host")
# If no header is provided, assume we can't verify origin
if origin is None:
self.log.warn(
"Missing Origin header, rejecting WebSocket connection.")
return False
if host is None:
self.log.warn(
"Missing Host header, rejecting WebSocket connection.")
return False
origin = origin.lower()
origin_host = urlparse(origin).netloc
# OK if origin matches host
if origin_host == host:
return True
# Check CORS headers
if self.allow_origin:
allow = self.allow_origin == origin
elif self.allow_origin_pat:
allow = bool(self.allow_origin_pat.match(origin))
else:
# No CORS headers deny the request
allow = False
if not allow:
self.log.warn("Blocking Cross Origin WebSocket Attempt. Origin: %s, Host: %s",
origin, host,
)
return allow
def clear_cookie(self, *args, **kwargs):
"""meaningless for websockets"""
pass
def _reserialize_reply(self, msg_list):
"""Reserialize a reply message using JSON.
This takes the msg list from the ZMQ socket, deserializes it using
self.session and then serializes the result using JSON. This method
should be used by self._on_zmq_reply to build messages that can
be sent back to the browser.
"""
idents, msg_list = self.session.feed_identities(msg_list)
msg = self.session.deserialize(msg_list)
if msg['buffers']:
buf = serialize_binary_message(msg)
return buf
else:
smsg = json.dumps(msg, default=date_default)
return cast_unicode(smsg)
def _on_zmq_reply(self, msg_list):
# Sometimes this gets triggered when the on_close method is scheduled in the
# eventloop but hasn't been called.
if self.stream.closed():
return
try:
msg = self._reserialize_reply(msg_list)
except Exception:
self.log.critical("Malformed message: %r" %
msg_list, exc_info=True)
else:
self.write_message(msg, binary=isinstance(msg, bytes))
class AuthenticatedZMQStreamHandler(ZMQStreamHandler, IPythonHandler):
ping_callback = None
last_ping = 0
last_pong = 0
@property
def ping_interval(self):
"""The interval for websocket keep-alive pings.
Set ws_ping_interval = 0 to disable pings.
"""
return self.settings.get('ws_ping_interval', WS_PING_INTERVAL)
@property
def ping_timeout(self):
"""If no ping is received in this many milliseconds,
close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
Default is max of 3 pings or 30 seconds.
"""
return self.settings.get('ws_ping_timeout',
max(3 * self.ping_interval, WS_PING_INTERVAL)
)
def set_default_headers(self):
"""Undo the set_default_headers in IPythonHandler
which doesn't make sense for websockets
"""
pass
def pre_get(self):
"""Run before finishing the GET request
Extend this method to add logic that should fire before
the websocket finishes completing.
"""
# authenticate the request before opening the websocket
if self.get_current_user() is None:
self.log.warn("Couldn't authenticate WebSocket connection")
raise web.HTTPError(403)
if self.get_argument('session_id', False):
self.session.session = cast_unicode(
self.get_argument('session_id'))
else:
self.log.warn("No session ID specified")
@gen.coroutine
def get(self, *args, **kwargs):
# pre_get can be a coroutine in subclasses
# assign and yield in two step to avoid tornado 3 issues
res = self.pre_get()
yield gen.maybe_future(res)
super(AuthenticatedZMQStreamHandler, self).get(*args, **kwargs)
def initialize(self):
self.log.debug(
"Initializing websocket connection %s", self.request.path)
self.session = Session(config=self.config)
def open(self, *args, **kwargs):
self.log.debug("Opening websocket %s", self.request.path)
# start the pinging
if self.ping_interval > 0:
# Remember time of last ping
self.last_ping = ioloop.IOLoop.instance().time()
self.last_pong = self.last_ping
self.ping_callback = ioloop.PeriodicCallback(
self.send_ping, self.ping_interval)
self.ping_callback.start()
def send_ping(self):
"""send a ping to keep the websocket alive"""
if self.stream.closed() and self.ping_callback is not None:
self.ping_callback.stop()
return
# check for timeout on pong. Make sure that we really have sent a recent ping in
# case the machine with both server and client has been suspended since
# the last ping.
now = ioloop.IOLoop.instance().time()
since_last_pong = 1e3 * (now - self.last_pong)
since_last_ping = 1e3 * (now - self.last_ping)
if since_last_ping < 2 * self.ping_interval and since_last_pong > self.ping_timeout:
self.log.warn(
"WebSocket ping timeout after %i ms.", since_last_pong)
self.close()
return
self.ping(b'')
self.last_ping = now
def on_pong(self, data):
self.last_pong = ioloop.IOLoop.instance().time()
|
|
import re
from django.contrib.auth.models import (AbstractBaseUser, AbstractUser,
BaseUserManager)
from django.db import connections, models
from django.db.models import Q, DO_NOTHING, SET_NULL
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
# Create your models here.
class Award(models.Model):
name = models.CharField(max_length=255, null=False)
description = models.TextField(default="")
class Meta:
verbose_name = _("Award")
def __str__(self):
return self.name
def stringify(self):
return {
"id": self.id,
"name": self.name,
"description": self.description
}
def stringify_meta(self):
return self.stringify()
class User(AbstractUser):
nickname = models.CharField(
_('nick_name'), max_length=255, null=False, unique=True)
profile = models.TextField(_('profile'), default="")
current_award = models.ForeignKey("UserAward", null=True, on_delete=SET_NULL)
experience = models.IntegerField(_('experience'), default=0)
snipe = models.IntegerField(_('snipe'), default=0)
sniped = models.IntegerField(_('sniped'), default=0)
REQUIRED_FIELDS = ['nickname']
def get_full_name(self):
return self.nickname
def get_short_name(self):
return self.nickname
def __str__(self):
return self.nickname
def stringify(self):
if self.current_award:
current_award_str = self.current_award.stringify_meta(user=False)
else:
current_award_str = None
available_awards = [
ua.stringify_meta(user=False)
for ua in UserAward.objects.filter(user_id=self)
]
return {
"id": self.id,
"nickname": re.sub("<", "<", re.sub(">", ">", self.nickname)),
"profile": self.profile,
"current_award": current_award_str,
"available_awards": available_awards,
"experience": self.experience,
"username": self.username,
"date_joined": self.date_joined,
"last_login": self.last_login,
"snipe": self.snipe,
"sniped": self.sniped,
}
def stringify_meta(self):
if self.current_award:
current_award_str = self.current_award.stringify_meta(user=False)
else:
current_award_str = None
return {
"id": self.id,
"nickname": re.sub("<", "<", re.sub(">", ">", self.nickname)),
"current_award": current_award_str,
"experience": self.experience,
"username": self.username,
"date_joined": self.date_joined
}
class UserAward(models.Model):
user_id = models.ForeignKey(User)
award_id = models.ForeignKey(Award)
created = models.DateField(_("created"), null=False, default=timezone.now)
class Meta:
verbose_name = _("User-Award")
def __str__(self):
return "[%s] owns [%s]" % (self.user_id.nickname, self.award_id)
def stringify(self, user=True, award=True):
returns = {"created": self.created, "id": self.id}
if user:
returns["user_id"] = self.user_id.stringify_meta()
if award:
returns["award_id"] = self.award_id.stringify_meta(),
return returns
def stringify_meta(self, *args, **kwargs):
return self.stringify(*args, **kwargs)
class Mondai(models.Model):
'''
genre:
0: umigame
1: tobira
2: kameo
3: shin-keshiki
'''
id = models.AutoField(max_length=11, null=False, primary_key=True)
user_id = models.ForeignKey(User, db_column='user_id')
title = models.CharField(_('title'), max_length=255, null=False)
yami = models.BooleanField(_('yami'), default=False, null=False)
genre = models.IntegerField(_('genre'), default=0, null=False)
content = models.TextField(_('content'), null=False)
kaisetu = models.TextField(_('kaisetu'), null=False)
created = models.DateTimeField(_('created'), null=False)
modified = models.DateTimeField(_('modified'), null=False)
status = models.IntegerField(_('status'), default=0, null=False)
memo = models.TextField(_('memo'), default="")
score = models.FloatField(_('score'), default=0, null=False)
class Meta:
verbose_name = _("Soup")
def __str__(self):
return self.title
def stringify(self):
return {
"id": self.id,
"user_id": self.user_id.stringify_meta(),
"title": re.sub("<", "<", re.sub(">", ">", self.title)),
"yami": self.yami,
"genre": self.genre,
"content": self.content,
"kaisetu": self.kaisetu,
"created": self.created,
"modified": self.modified,
"status": self.status,
"memo": self.memo,
"score": self.score
}
def stringify_meta(self):
ques = Shitumon.objects.filter(mondai_id=self)
unanswered = ques.filter(Q(kaitou__isnull=True) | Q(kaitou__exact=""))
return {
"id": self.id,
"user_id": self.user_id.stringify_meta(),
"title": re.sub("<", "<", re.sub(">", ">", self.title)),
"yami": self.yami,
"genre": self.genre,
"created": self.created,
"modified": self.modified,
"status": self.status,
"score": self.score,
"star_count": self.star_set.count(),
"quescount_all": ques.count(),
"quescount_unanswered": unanswered.count()
}
mondai_genre_enum = {
0: _("Albatross"),
1: _("20th-Door"),
2: _("Little Albat"),
3: _("Others & Formal")
}
mondai_status_enum = {
0: _("Unsolved"),
1: _("Solved"),
2: _("Dazed"),
3: _("Hidden"),
4: _("Forced Hidden")
}
class Shitumon(models.Model):
id = models.AutoField(max_length=11, null=False, primary_key=True)
user_id = models.ForeignKey(User, db_column='user_id')
mondai_id = models.ForeignKey(Mondai, db_column='mondai_id')
shitumon = models.TextField(_('shitumon'), null=False)
kaitou = models.TextField(_('kaitou'), null=True)
good = models.BooleanField(_('good_ques'), default=False, null=False)
true = models.BooleanField(_('true_ques'), default=False, null=False)
askedtime = models.DateTimeField(_('askedtime'), null=False)
answeredtime = models.DateTimeField(_('answeredtime'), null=True)
class Meta:
verbose_name = _("Question")
def __str__(self):
return "[%s]%s: {%s} puts {%50s}" % (self.mondai_id.id, self.mondai_id,
self.user_id, self.shitumon)
def stringify_meta(self):
return self.stringify()
def stringify(self):
return {
"id": self.id,
"user_id": self.user_id.stringify_meta(),
"owner_id": self.mondai_id.user_id.stringify_meta(),
"shitumon": self.shitumon,
"kaitou": self.kaitou,
"good": self.good,
"true": self.true,
"askedtime": self.askedtime,
"answeredtime": self.answeredtime
}
class Lobby(models.Model):
id = models.AutoField(max_length=11, null=False, primary_key=True)
user_id = models.ForeignKey(User, db_column='user_id')
channel = models.TextField(_('channel'), default="lobby", null=False)
content = models.TextField(_('content'), null=False)
#score = models.SmallIntegerField(_('score'), default=50)
class Meta:
permissions = (
("can_add_info", _("Can add homepage info")),
("can_grant_award", _("Can grant awards to users")), )
verbose_name = _("Lobby")
def __str__(self):
return "[%s]: {%s} puts {%50s}" % (self.channel, self.user_id,
self.content)
def stringify_meta(self):
return self.stringify()
def stringify(self):
return {
"id": self.id,
"user_id": self.user_id.stringify_meta(),
"channel": self.channel,
"content": self.content
}
class Comment(models.Model):
user_id = models.ForeignKey(User, db_column='user_id')
mondai_id = models.ForeignKey(Mondai, db_column='mondai_id')
content = models.TextField(_('content'), null=False)
spoiler = models.BooleanField(_('spoiler'), default=False)
class Meta:
verbose_name = _("Comment")
def __str__(self):
return "{%s} commented on {%s}" % (self.user_id, self.mondai_id.title)
def stringify_meta(self):
return self.stringify()
def stringify(self):
return {
"id": self.id,
"user_id": self.user_id.stringify_meta(),
"mondai_id": self.mondai_id.stringify_meta(),
"content": self.content
}
class Star(models.Model):
user_id = models.ForeignKey(User, db_column='user_id')
mondai_id = models.ForeignKey(Mondai, db_column='mondai_id')
value = models.FloatField(_('Value'), null=False, default=0)
class Meta:
verbose_name = _("Star")
def __str__(self):
return "%s -- %.1f --> %s" % (self.user_id, self.value, self.mondai_id)
def stringify_meta(self):
return self.stringify()
def stringify(self):
return {
"id": self.id,
"user_id": self.user_id.stringify_meta(),
"mondai_id": self.mondai_id.stringify_meta(),
"value": self.value
}
|
|
# Copyright (C) 2014 Napuzba [kobi@napuzba.com]
# Licensed under MIT license [http://openreq.source.org/licenses/MIT]
import base64
import codecs
import ftplib
import hashlib
import io
import time
import urllib3
import socket
import os
import os.path
import sys
from .cachemode import CacheMode
from .filerequest import FileRequest, RequestState
from .ftpinfo import FtpInfo
from . import helpers
class FileLoader:
'''
FileLoader allows to access in local files and remote files -- files which are accessible thought
HTTP and FTP protocols -- in uniform way. The files can be cached locally.
'''
def __init__(self,dirCache="cache"):
'''
Create a new FileLoader
:param str dirCache:
The cache directory
'''
self.dirCache = dirCache
self.cachePattern = [2]
urllib3.disable_warnings()
self.http = urllib3.PoolManager()
helpers.ensureFoler(self.dirCache)
def load(self,
source ,
target = '' ,
params = {} ,
timeout = 0 ,
retries = 0 ,
contentType = '' ,
onDownload = None,
autoLoad = True,
cacheMode = CacheMode.Enabled ,
cacheTime = 0 ,
headers = {} ):
'''
'''
req = FileRequest(
source ,
target = target ,
params = params ,
timeout = timeout ,
retries = retries ,
contentType = contentType,
onDownload = onDownload ,
autoLoad = autoLoad ,
cacheMode = cacheMode ,
cacheTime = cacheTime ,
headers = headers
)
return self.request(req)
def request(self, req):
'''
Downloads a file located on req.source
:param DownloadRequest req:
The download request
'''
req.clear()
if req.source.startswith('http'):
self.loadHttp (req)
elif req.source.startswith('ftp' ):
self.loadFtp (req)
elif req.source != '' :
self.loadLocal(req)
if req.valid and req.autoLoad:
req.loadData()
return req
def checkCache(self,req):
'''
Check whether the file in cache
:param FileRequest req:
The download request
'''
req.target = self.findCachedName(req.source , req.target, True)
req.state = RequestState.PendingCache
inCache = False
if os.path.exists(req.target):
cacheTime = time.time() - os.path.getmtime(req.target)
if req.cacheTime != 0 and cacheTime >= req.cacheTime :
os.remove(req.target)
else:
inCache = True
if (req.cacheMode == CacheMode.InCache or req.cacheMode == CacheMode.Enabled) and inCache:
self.log(u'Find Cache: <{1}> as <{0}>',req.source,req.target)
req.state = RequestState.Cached
req.target = req.target
return
if req.cacheMode == CacheMode.InCache and inCache == False:
self.log(u'Miss Cache: <{1}> as <{0}>',req.source,req.target)
req.state = RequestState.FailMissCache
return
req.state = RequestState.PendingDownload
def loadHttp(self,req):
'''
Downloads a file located on req.source
:param DownloadRequest req:
The download request
'''
self.checkCache(req)
if req.state != RequestState.PendingDownload:
return
counter = 0
if sys.version_info[0] == 2:
if isinstance(req.source,str):
req.source = req.source.encode('utf8')
while (counter <= req.retries) and (req.state != RequestState.Downloaded):
if counter >= 1:
self.log(u"Retry {0} : {1}",counter, req.source)
time.sleep(1)
counter += 1
try:
ff = self.http.urlopen(req.action , req.source ,preload_content=False )
req.rHeaders = ff.headers
req.rStatus = ff.status
if ff.status != 200 and ff.status >= 400:
continue
if req.contentType != '' and ff.headers['Content-Type'].find(req.contentType) == -1:
req.state = RequestState.FailDownload
break
fileSize = None
if 'Content-Length' in ff.headers:
fileSize = int(ff.headers['Content-Length'])
chunkSize = 4*1024
bb = io.BufferedReader(ff, chunkSize)
with open(req.target, "wb") as fl:
lastTime = time.time()
downSize = lastSize = downSpeed = 0
while True:
data = bb.read(chunkSize)
dataSize = len(data)
fl.write(data)
downSize += dataSize
if req.onDownload != None:
deltaTime = time.time() - lastTime
if deltaTime >= 1:
downSpeed = (downSize - lastSize) / deltaTime
lastTime, lastSize = time.time() , downSize
req.onDownload(fileSize,downSize, downSpeed)
if downSize == fileSize or dataSize < chunkSize or dataSize == 0:
break
req.target = req.target
req.state = RequestState.Downloaded
except IOError as ee:
req.state = RequestState.FailDownload
if hasattr(ee, 'reason'):
self.log(u'Fail download <{0}>. Reason: {1}',req.source, str(ee.reason))
elif hasattr(ee, 'code'):
self.log(u'Fail download <{0}>. The server could not fulfill the request. Error code: {1}',req.source,str(ee.code))
except Exception as ee:
req.state = RequestState.FailDownload
self.log(u'Fail download <{0}>',req.source)
self.logException(ee)
if req.failed and os.path.exists(req.target):
os.remove(req.target)
def loadFtp(self, req):
'''
Downloads a file located on source
'''
self.checkCache(req)
if req.state != RequestState.PendingDownload:
return
ftpInfo = FtpInfo().parse(req.source)
try:
self.ftp = ftplib.FTP()
self.ftp.connect(ftpInfo.host,ftpInfo.port)
self.log(u'**** Connected to host "{0}"',ftpInfo.host)
except (socket.error, socket.gaierror) as e:
self.log(u'ERR: cannot reach "{0}"',ftpInfo.host)
req.state = filerequest.RequestState.FailDownload
return
try:
if ftpInfo.username != '':
self.ftp.login(ftpInfo.username, ftpInfo.password)
self.log(u'**** Logged in as {0}', ftpInfo.username)
else:
self.ftp.login()
self.log(u'**** Logged in as "anonymous"')
except ftplib.error_perm:
self.log(u'ERR: cannot login')
req.state = filerequest.RequestState.FailDownload
self.ftp.quit()
return
try:
self.ftp.cwd(ftpInfo.path)
self.log(u'**** Changed to "{0}" folder',ftpInfo.path)
except ftplib.error_perm:
self.log(u'ERR: cannot CD to "{0}"',ftpInfo.path)
req.state = filerequest.RequestState.FailDownload
self.ftp.quit()
return
self.bytes = 0
try:
self.ftp.retrbinary('RETR {0}'.format(ftpInfo.file), open(req.target, 'wb').write)
self.target = req.target
self.log(u'**** Downloaded "{0}" to CWD',ftpInfo.file, self.target)
self.ftp.quit()
req.state = RequestState.Downloaded
except ftplib.error_perm:
self.log(u'ERR: cannot read file "{0}"',file)
os.unlink(self.file)
req.state = RequestState.FailDownload
self.ftp.quit()
return
def loadLocal(self, req):
'''
Downloads a file located on source
'''
if os.path.exists(req.source) == False:
req.state = RequestState.FailDownload
return
req.state = RequestState.Downloaded
req.target = req.source
def findCachedName(self, source, target, isCached):
'''
Find the cached name of the file
:param source:
source file url
:param target:
target file path
:param isCached:
whether the the target is cached
:return :str:
the cached name of the file
'''
if target.startswith('@'):
return target[1:]
sum = self.hash(source,16) if isCached else '0000000000000000'
dirCache = self.dirCache
kk = 0
for aa in self.cachePattern:
dirCache = os.path.join(dirCache, sum[kk:kk+aa])
kk += aa
helpers.ensureFoler(dirCache)
if target == '':
return os.path.join( dirCache , 'file_{0}.bin'.format(sum) )
extIndex = target.rfind('.')
if extIndex != -1:
return os.path.join( dirCache , u'{0}_{1}{2}'.format(target[:extIndex],sum,target[extIndex:]) )
return os.path.join( dirCache , '{0}_{1}{2}'.format(target, sum,'.bin') )
def hash(self, ss, size = 6):
'''
Hash ss
:param str ss:
String to hash
:param int size:
Hash size in characters
:return :str:
The hash value
'''
if sys.version_info[0] >= 3 or isinstance(ss,unicode):
ss = ss.encode('utf8')
hh = base64.urlsafe_b64encode(hashlib.sha224(ss).digest())
if sys.version_info[0] == 3:
hh = hh.decode('ascii')
return hh[:size]
def log(self,msg,*args):
'''
Log message
:param str msg:
message to log
'''
pass # sys.stdout.write(u'{0} : {1}\n'.format( "FileLoader" , msg.format(*args) ))
def logException(self,ee):
'''
Log exception
:param Exception ee:
exception
'''
pass # sys.stdout.write(u'{0} : {1}\n'.format( "FileLoader" , str(ee) ))
|
|
"""
File handler to support different file extensions.
Uses reflectometer registry utility.
The default readers are found in the 'readers' sub-module
and registered by default at initialization time.
To add a new default reader, one must register it in
the register_readers method found in readers/__init__.py.
A utility method (find_plugins) is available to inspect
a directory (for instance, a user plug-in directory) and
look for new readers/writers.
"""
#####################################################################
# This software was developed by the University of Tennessee as part of the
# Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
# project funded by the US National Science Foundation.
# See the license text in license.txt
# copyright 2008, University of Tennessee
######################################################################
import os
import sys
import logging
import time
from zipfile import ZipFile
from sas.sascalc.data_util.registry import ExtensionRegistry
# Default readers are defined in the readers sub-module
import readers
from loader_exceptions import NoKnownLoaderException, FileContentsException,\
DefaultReaderException
from readers import ascii_reader
from readers import cansas_reader
from readers import cansas_reader_HDF5
logger = logging.getLogger(__name__)
class Registry(ExtensionRegistry):
"""
Registry class for file format extensions.
Readers and writers are supported.
"""
def __init__(self):
super(Registry, self).__init__()
# Writers
self.writers = {}
# List of wildcards
self.wildcards = ['All (*.*)|*.*']
# Creation time, for testing
self._created = time.time()
# Register default readers
readers.read_associations(self)
def load(self, path, format=None):
"""
Call the loader for the file type of path.
:param path: file path
:param format: explicit extension, to force the use
of a particular reader
Defaults to the ascii (multi-column), cansas XML, and cansas NeXuS
readers if no reader was registered for the file's extension.
"""
# Gets set to a string if the file has an associated reader that fails
msg_from_reader = None
try:
return super(Registry, self).load(path, format=format)
except NoKnownLoaderException as nkl_e:
pass # Try the ASCII reader
except FileContentsException as fc_exc:
# File has an associated reader but it failed.
# Save the error message to display later, but try the 3 default loaders
msg_from_reader = fc_exc.message
except Exception:
pass
# File has no associated reader, or the associated reader failed.
# Try the ASCII reader
try:
ascii_loader = ascii_reader.Reader()
return ascii_loader.read(path)
except DefaultReaderException:
pass # Loader specific error to try the cansas XML reader
except FileContentsException as e:
if msg_from_reader is None:
raise RuntimeError(e.message)
# ASCII reader failed - try CanSAS xML reader
try:
cansas_loader = cansas_reader.Reader()
return cansas_loader.read(path)
except DefaultReaderException:
pass # Loader specific error to try the NXcanSAS reader
except FileContentsException as e:
if msg_from_reader is None:
raise RuntimeError(e.message)
except Exception:
pass
# CanSAS XML reader failed - try NXcanSAS reader
try:
cansas_nexus_loader = cansas_reader_HDF5.Reader()
return cansas_nexus_loader.read(path)
except DefaultReaderException as e:
logging.error("No default loader can load the data")
# No known reader available. Give up and throw an error
if msg_from_reader is None:
msg = "\nUnknown data format: {}.\nThe file is not a ".format(path)
msg += "known format that can be loaded by SasView.\n"
raise NoKnownLoaderException(msg)
else:
# Associated reader and default readers all failed.
# Show error message from associated reader
raise RuntimeError(msg_from_reader)
except FileContentsException as e:
err_msg = msg_from_reader if msg_from_reader is not None else e.message
raise RuntimeError(err_msg)
def find_plugins(self, dir):
"""
Find readers in a given directory. This method
can be used to inspect user plug-in directories to
find new readers/writers.
:param dir: directory to search into
:return: number of readers found
"""
readers_found = 0
temp_path = os.path.abspath(dir)
if not os.path.isdir(temp_path):
temp_path = os.path.join(os.getcwd(), dir)
if not os.path.isdir(temp_path):
temp_path = os.path.join(os.path.dirname(__file__), dir)
if not os.path.isdir(temp_path):
temp_path = os.path.join(os.path.dirname(sys.path[0]), dir)
dir = temp_path
# Check whether the directory exists
if not os.path.isdir(dir):
msg = "DataLoader couldn't locate DataLoader plugin folder."
msg += """ "%s" does not exist""" % dir
logger.warning(msg)
return readers_found
for item in os.listdir(dir):
full_path = os.path.join(dir, item)
if os.path.isfile(full_path):
# Process python files
if item.endswith('.py'):
toks = os.path.splitext(os.path.basename(item))
try:
sys.path.insert(0, os.path.abspath(dir))
module = __import__(toks[0], globals(), locals())
if self._identify_plugin(module):
readers_found += 1
except:
msg = "Loader: Error importing "
msg += "%s\n %s" % (item, sys.exc_value)
logger.error(msg)
# Process zip files
elif item.endswith('.zip'):
try:
# Find the modules in the zip file
zfile = ZipFile(item)
nlist = zfile.namelist()
sys.path.insert(0, item)
for mfile in nlist:
try:
# Change OS path to python path
fullname = mfile.replace('/', '.')
fullname = os.path.splitext(fullname)[0]
module = __import__(fullname, globals(),
locals(), [""])
if self._identify_plugin(module):
readers_found += 1
except:
msg = "Loader: Error importing"
msg += " %s\n %s" % (mfile, sys.exc_value)
logger.error(msg)
except:
msg = "Loader: Error importing "
msg += " %s\n %s" % (item, sys.exc_value)
logger.error(msg)
return readers_found
def associate_file_type(self, ext, module):
"""
Look into a module to find whether it contains a
Reader class. If so, APPEND it to readers and (potentially)
to the list of writers for the given extension
:param ext: file extension [string]
:param module: module object
"""
reader_found = False
if hasattr(module, "Reader"):
try:
# Find supported extensions
loader = module.Reader()
if ext not in self.loaders:
self.loaders[ext] = []
# Append the new reader to the list
self.loaders[ext].append(loader.read)
reader_found = True
# Keep track of wildcards
type_name = module.__name__
if hasattr(loader, 'type_name'):
type_name = loader.type_name
wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(),
ext.lower())
if wcard not in self.wildcards:
self.wildcards.append(wcard)
# Check whether writing is supported
if hasattr(loader, 'write'):
if ext not in self.writers:
self.writers[ext] = []
# Append the new writer to the list
self.writers[ext].append(loader.write)
except:
msg = "Loader: Error accessing"
msg += " Reader in %s\n %s" % (module.__name__, sys.exc_value)
logger.error(msg)
return reader_found
def associate_file_reader(self, ext, loader):
"""
Append a reader object to readers
:param ext: file extension [string]
:param module: reader object
"""
reader_found = False
try:
# Find supported extensions
if ext not in self.loaders:
self.loaders[ext] = []
# Append the new reader to the list
self.loaders[ext].append(loader.read)
reader_found = True
# Keep track of wildcards
if hasattr(loader, 'type_name'):
type_name = loader.type_name
wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(),
ext.lower())
if wcard not in self.wildcards:
self.wildcards.append(wcard)
except:
msg = "Loader: Error accessing Reader "
msg += "in %s\n %s" % (loader.__name__, sys.exc_value)
logger.error(msg)
return reader_found
def _identify_plugin(self, module):
"""
Look into a module to find whether it contains a
Reader class. If so, add it to readers and (potentially)
to the list of writers.
:param module: module object
"""
reader_found = False
if hasattr(module, "Reader"):
try:
# Find supported extensions
loader = module.Reader()
for ext in loader.ext:
if ext not in self.loaders:
self.loaders[ext] = []
# When finding a reader at run time,
# treat this reader as the new default
self.loaders[ext].insert(0, loader.read)
reader_found = True
# Keep track of wildcards
type_name = module.__name__
if hasattr(loader, 'type_name'):
type_name = loader.type_name
wcard = "%s files (*%s)|*%s" % (type_name, ext.lower(),
ext.lower())
if wcard not in self.wildcards:
self.wildcards.append(wcard)
# Check whether writing is supported
if hasattr(loader, 'write'):
for ext in loader.ext:
if ext not in self.writers:
self.writers[ext] = []
self.writers[ext].insert(0, loader.write)
except:
msg = "Loader: Error accessing Reader"
msg += " in %s\n %s" % (module.__name__, sys.exc_value)
logger.error(msg)
return reader_found
def lookup_writers(self, path):
"""
:return: the loader associated with the file type of path.
:Raises ValueError: if file type is not known.
"""
# Find matching extensions
extlist = [ext for ext in self.extensions() if path.endswith(ext)]
# Sort matching extensions by decreasing order of length
extlist.sort(lambda a, b: len(a) < len(b))
# Combine loaders for matching extensions into one big list
writers = []
for L in [self.writers[ext] for ext in extlist]:
writers.extend(L)
# Remove duplicates if they exist
if len(writers) != len(set(writers)):
result = []
for L in writers:
if L not in result:
result.append(L)
writers = L
# Raise an error if there are no matching extensions
if len(writers) == 0:
raise ValueError, "Unknown file type for " + path
# All done
return writers
def save(self, path, data, format=None):
"""
Call the writer for the file type of path.
Raises ValueError if no writer is available.
Raises KeyError if format is not available.
May raise a writer-defined exception if writer fails.
"""
if format is None:
writers = self.lookup_writers(path)
else:
writers = self.writers[format]
for fn in writers:
try:
return fn(path, data)
except:
pass # give other loaders a chance to succeed
# If we get here it is because all loaders failed
raise # reraises last exception
class Loader(object):
"""
Utility class to use the Registry as a singleton.
"""
## Registry instance
__registry = Registry()
def associate_file_type(self, ext, module):
"""
Look into a module to find whether it contains a
Reader class. If so, append it to readers and (potentially)
to the list of writers for the given extension
:param ext: file extension [string]
:param module: module object
"""
return self.__registry.associate_file_type(ext, module)
def associate_file_reader(self, ext, loader):
"""
Append a reader object to readers
:param ext: file extension [string]
:param module: reader object
"""
return self.__registry.associate_file_reader(ext, loader)
def load(self, file, format=None):
"""
Load a file
:param file: file name (path)
:param format: specified format to use (optional)
:return: DataInfo object
"""
return self.__registry.load(file, format)
def save(self, file, data, format):
"""
Save a DataInfo object to file
:param file: file name (path)
:param data: DataInfo object
:param format: format to write the data in
"""
return self.__registry.save(file, data, format)
def _get_registry_creation_time(self):
"""
Internal method used to test the uniqueness
of the registry object
"""
return self.__registry._created
def find_plugins(self, directory):
"""
Find plugins in a given directory
:param dir: directory to look into to find new readers/writers
"""
return self.__registry.find_plugins(directory)
def get_wildcards(self):
"""
Return the list of wildcards
"""
return self.__registry.wildcards
|
|
from datetime import datetime
import json
from unittest.mock import Mock, patch
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from builder.models import BranchBuild, Build, Deploy, Environment, Owner, Site
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
def get_mock_data(location, method):
path = 'github/mock_data/{0}/{1}.json'.format(location, method)
with open(path) as json_file:
return json.load(json_file)
class v1ContractTestCase(APITestCase):
"""
WARNING: Making changes to these tests may break expected user
functionality for calls to the v1 of the API and thus break the implied
service contract.
Code changes that effect these tests should be made with great care.
"""
def setUp(self):
self.user = User.objects.create_user(username="testuser", password="a")
token = 'abc123'
self.header = {'HTTP_AUTHORIZATION': 'Bearer {}'.format(token)}
social = self.user.social_auth.create(provider='github', uid=123)
social.extra_data['access_token'] = token
social.save()
self.owner = Owner.objects.create(name='isl', github_id=607333)
self.site = Site.objects.create(
owner=self.owner, name='foo', github_id=45864453)
Environment.objects.create(site=self.site, name='Production',
url='foo.example.com')
self.env = Environment.objects.create(site=self.site, name='Staging',
url='foo-staging.example.com')
def test_get_auth_github(self):
""" Get public app key for calling github """
url = '/v1/auth/github/'
self.assertEqual(reverse('get_token'), url)
expected = {'client_id': 'foo'}
with patch.dict('os.environ', {'SOCIAL_AUTH_GITHUB_KEY': 'foo'}):
response = self.client.get(url)
self.assertEqual(expected, response.data)
@patch('github.views.do_auth')
@patch('core.helpers.requests.post')
def test_post_auth_github(self, mock_post, mock_auth):
""" Complete auth with temp token. Retrieve oAuth token """
url = '/v1/auth/github/'
self.assertEqual(reverse('get_token'), url)
# mocking the return object from POSTing to github API
mock_post_response = Mock(status_code=200)
mock_post_response.json.return_value = {"access_token": "testtoken"}
mock_post.return_value = mock_post_response
# mocking the do_auth method to just return our user
mock_auth.return_value = self.user
expected = {'user': {'username': 'testuser'}, 'token': 'testtoken'}
response = self.client.post(url, {
'code': 'asdf',
'clientId': 'asdf',
'redirectUrl': 'asdf'
}, **self.header)
self.assertEqual(expected, response.data)
def test_get_user_details(self):
""" Returns details about the user who owns the passed token """
url = '/v1/user/'
self.assertEqual(reverse('user_details'), url)
expected = {'username': 'testuser'}
response = self.client.get(url, **self.header)
self.assertEqual(expected, response.data)
@patch('core.helpers.requests.get')
def test_get_users_repos(self, mock_get):
""" Get repos from github that the user has access to """
url = '/v1/repos/'
self.assertEqual(reverse('deployable_repos'), url)
expected = get_mock_data('github', 'get_repos')
# get list of repos from github
get_repos = Mock(status_code=200)
get_repos.json.return_value = expected
mock_get.return_value = get_repos
response = self.client.get(url, **self.header)
self.assertEqual(expected, response.data)
@patch('core.helpers.requests.post')
@patch('core.helpers.requests.get')
def test_post_projects(self, mock_get, mock_post):
""" Register a project """
url = '/v1/projects/'
self.assertEqual(reverse('project_list'), url)
# Repo details from github
get_repo = Mock(status_code=200)
get_repo.json.return_value = get_mock_data('github', 'get_repo')
mock_get.return_value = get_repo
# Creating webhook and deploy key on github repo
post_github = Mock(status_code=200)
post_github.json.return_value = {'id': 123}
mock_post.return_value = post_github
expected = get_mock_data('api_responses', 'new_site')
with patch.dict('os.environ', {'BASE_URL': 'example.com'}):
response = self.client.post(url, {'github': 'isl/bar'},
**self.header)
self.assertEqual(ordered(expected), ordered(response.data))
@patch('core.helpers.requests.get')
def test_get_projects(self, mock_get):
""" List of registered projects """
url = '/v1/projects/'
self.assertEqual(reverse('project_list'), url)
# users orgs from github
get_repo = Mock(status_code=200)
get_repo.json.return_value = get_mock_data('github', 'get_user_orgs')
mock_get.return_value = get_repo
expected = get_mock_data('api_responses', 'projects')
response = self.client.get(url, **self.header)
self.assertEqual(ordered(expected), ordered(response.data))
@patch('core.helpers.requests.get')
@patch('core.helpers.requests.delete')
def test_delete_project(self, mock_delete, mock_get):
""" Delete a project that is already registered """
url = '/v1/projects/45864453'
self.assertEqual(reverse('project_details', args=['45864453', ]), url)
# Repo details from github
get_repo = Mock(status_code=200)
get_repo.json.return_value = get_mock_data('github', 'get_repo')
mock_get.return_value = get_repo
# attempts to call github to delete webhook and deploy key
mock_delete.return_value = Mock(status_code=204)
response = self.client.delete(url, **self.header)
self.assertEqual(204, response.status_code)
@patch('core.helpers.requests.get')
def test_get_project_details(self, mock_get):
""" Returns details for a project that is registered """
url = '/v1/projects/45864453'
self.assertEqual(reverse('project_details', args=['45864453', ]), url)
# Repo details from github
get_repo = Mock(status_code=200)
get_repo.json.return_value = get_mock_data('github', 'get_repo')
mock_get.return_value = get_repo
expected = get_mock_data('api_responses', 'new_site')
with patch.dict('os.environ', {'BASE_URL': 'example.com'}):
response = self.client.get(url, **self.header)
self.assertEqual(ordered(expected), ordered(response.data))
@patch('core.helpers.requests.post')
@patch('core.helpers.requests.get')
def test_deploy_project(self, mock_get, mock_post):
""" Attempts to deploy the latest commit for the project """
url = '/v1/projects/45864453/builds'
self.assertEqual(reverse('project_builds', args=['45864453', ]), url)
# Repo details from github
get_repo = Mock(status_code=200)
get_repo.json.return_value = get_mock_data('github', 'get_repo')
# branch details from github
get_branch = Mock(status_code=200)
get_branch.json.return_value = get_mock_data('github', 'get_branch')
mock_get.side_effect = [get_repo, get_branch]
# Creating webhook and deploy key on github repo
mock_post.return_value = Mock(status_code=200)
expected = get_mock_data('api_responses', 'build')
# Build object has an auto-generated creation date. mock that.
with patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = datetime(2016, 5, 4)
response = self.client.post(url, **self.header)
self.assertEqual(ordered(expected), ordered(response.data))
def test_get_projects_builds(self):
""" Returns a list of all build objects attached to the project """
url = '/v1/projects/45864453/builds'
self.assertEqual(reverse('project_builds', args=['45864453', ]), url)
with patch('django.utils.timezone.now') as mock_now:
mock_now.return_value = datetime(2016, 5, 4)
BranchBuild.objects.create(
site=self.site, branch='staging', status=Build.BUILDING,
git_hash='d4f846545faa92894c6bf39dada28023b6ff9418')
expected = get_mock_data('api_responses', 'builds')
response = self.client.get(url, **self.header)
self.assertEqual(ordered(expected), ordered(response.data))
def test_promote_build(self):
""" Attempts to promote a successful build to a given environment """
url = '/v1/projects/45864453/environments/production'
self.assertEqual(reverse('promote_environment',
args=['45864453', 'production']), url)
build = BranchBuild.objects.create(
site=self.site, branch='staging', status=Build.SUCCESS,
git_hash='abc123')
Deploy.objects.create(build=build, environment=self.env)
response = self.client.post(url, {"uuid": build.uuid}, **self.header)
self.assertEqual(201, response.status_code)
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Project Details:
DR14_arrange.py
Arranges downloaded data from SDSS-DR14 into structure similar to 'bubbleimg'
Link to Bubbleimg: "https://github.com/aileisun/bubbleimg"
Created on Fri Aug 11 11:38:47 2017
__author__ = "nnarenraju"
__copyright__ = "Copyright 2017, AGN_Classification"
__credits__ = "nnarenraju"
__license__ = "Apache License 2.0"
__version__ = "1.0.1"
__maintainer__ = "nnarenraju"
__email__ = "nnarenraju@gmail.com"
__status__ = "inComplete"
Github Repository: "https://github.com/nnarenraju/ASIAA_SSP"
"""
import os
import glob
import functools
import astropy.table as at
import astropy.coordinates as ac
from functools import reduce
from astropy.io import ascii
from PyAstronomy import pyasl
from astropy import units as u
from astropy.coordinates import Angle
from distutils.dir_util import copy_tree
class Make_readable(object):
"""
Re-organises the DR14 default directory structure into a structure similar
to that of bubbleimg.
Groups together the spectrum files that are within 5 arcsec of the instance
If more than one spectrum is available for a given instance, the closest
spectrum wrt angular distance is chosen and moved into the good directory.
If there exists no spectrum within 5 arcsec of a given instance, the
instance will be put into the except directory.
"""
def __init__(self):
"""Current status of the Object."""
self._status = False
self._make_combined_table = False
self._clustering = False
self._make_spectrum = False
self._make_table = False
def _initialise(self, location):
"""Returns astropy table of given path to FITS/CSV/txt file."""
if "txt" in location:
return ascii.read(location)
else:
return at.Table.read(location)
def _combine(self, table, append_table):
"""Returns the concatenated astropy tables"""
if type(table)!=at.Table or type(append_table)!=at.Table:
raise TypeError('Input table is not an astropy table')
return at.join(table, append_table, join_type='outer')
def combine_tables(self, filename, dir_path=os.getcwd(), name='combined'):
"""Combine all tables that contain the filename as part of its name."""
location = glob.glob(dir_path+"*"+filename+"*")
if location == None:
raise NameError('Table not located in the given path')
else:
tables = map(self._initialise, location)
#Sanity check 0
if type(tables[0])!= at.Table:
raise TypeError('Astropy table could not be created')
combined_table = reduce(self._combine, tables)
if len(combined_table):
self._make_combined_table = True
try:
combined_table.writeto(name+'.fits')
except:
txt_file=open('download_rsync.txt', 'w')
c = combined_table
_list = [i for i in c[c.colnames[0]]]
txt_file.write("%s\n" %_list)
return glob.glob(dir_path + name +'.fits')
def __SAS__(constraint, cluster_table, radec_table=False):
"""Search Around Sky function"""
def __SkyCoord__(table):
table = ac.SkyCoord(ra = table['ra']*u.degree,
dec = table['dec']*u.degree)
return table
if ~radec_table:
radec_table=cluster_table
radec_table = __SkyCoord__(radec_table)
cluster_table = __SkyCoord__(cluster_table)
idxc1,idxc2,_,_=cluster_table.search_around_sky(radec_table,constraint)
return idxc1, idxc2
def _cluster(self, cluster_path, radec_path=False, constraint=5):
"""
Clusters the spectra obtained with the 'n' arcsec constraint.
table_path = Path of congregated spectra table list
NOTE: Use make_spectrum_list to congregate Spectra Tables.
"""
constraint=float(constraint)*u.arcsec
cluster_table = self._initialise(cluster_path)
if ~radec_path:
radec_table = False
else:
radec_table = self._initialise(radec_path)
idxc1, idxc2 = self.__SAS__(constraint, cluster_table=cluster_table,
radec_table=radec_table)
cluster = list(set(idxc1))
for x in range(len(cluster)):
index=[i for i,j in enumerate(idxc1) if j == cluster[x]]
cluster[x].append([idxc2[i] for i in index])
if len(cluster):
self._clustering=True
return cluster
def make_spectrum(self, table_path, input_table_path, dir_path=os.getcwd()):
"""
Using the clusters formed using the _cluster definition this definition
compares the input table with the clustered spectra table and moves the
instances without a spectrum to except.
"""
def _return_paths(database, RUN2D):
"""Returns a path based on given iterable"""
spectrum_path=dir_path+'/dr14/'+database+'/spectro'+'/redux/'+ \
RUN2D+'/spectra'+'/lite/'
return spectrum_path
def __mkdir__(dir_path, dir_name):
"""Creates requested directory and changes current directory"""
if not os.path.exists(dir_path+dir_name):
os.makedirs(dir_path+dir_name)
os.system("cd "+dir_path+dir_name)
def __sanity__(path):
"""Check consistency for number of files/directories"""
#Sanity Check 0
files = folders = 0
for _, dirnames, filenames in os.walk(path):
files += len(filenames)
folders += len(dirnames)
return files, folders
def __mkobj__(element, good=False):
"""Make directory for object eg., SDSSJXXXX:XXXX/CSV"""
#Object_name should be a string object
if not os.path.exists(object_name):
os.makedirs(object_name)
if good:
return __mkgood__(element, object_name)
else:
try:
ascii.write(element, 'sdss_xid.csv')
return True
except:
return False
def __mkgood__(element, object_name):
"""Move folder from DR14 using plate ID"""
pos_cluster = clusters[clusters.index(element)][1]
pos_radec = clusters[clusters.index(element)][0]
AngDist = []
for pos in pos_cluster:
AngDist.append(Angle(pyasl.getAngDist(table['ra'][pos],
table['dec'][pos],
table['ra'][pos_radec],
table['dec'][pos_radec]),
u.degree))
pos_dr14 = pos_cluster[AngDist.index(min(AngDist))]
plate = table[pos_dr14]['plate']
for path in required_paths:
try:
functools.partial(copy_tree,dst=object_name),path+'/'+plate
stored_path = object_name+'/'+plate
except:
continue
if os.path.exists(stored_path):
return True
else:
return False
def _move_to_except(dir_path):
"""Handles the except directory sub-section"""
__mkdir__(dir_path, dir_name='/except')
list_except = input_table[:]
not_req = map(lambda t: t[0], clusters)
ascii.write(list_except.remove_rows(not_req), 'list_except.csv')
status_mkobj = map(__mkobj__, list_except)
if all(status_mkobj):
print 'Directories have been succesfully created'
else:
raise ValueError('sdss_xid.csv not written for all objects')
files, folders = __sanity__()
if files==1 and folders==len(list_except):
print "list_except.csv and req directories succesfully written"
else:
raise ValueError("Inconsistent number of files/directory")
def _move_to_good(dir_path):
"""Handles the good directory sub-section"""
__mkdir__(dir_path, dir_name='/good')
req = map(lambda t: t[0], clusters)
list_good = input_table[:]
list_good = filter(lambda t: t.index in req, list_good)
ascii.write(list_good, 'list_good.csv')
status_mkobj = map(functools.partial(__mkobj__, good=True), req)
if all(status_mkobj):
print 'Directories have been succesfully created'
else:
raise ValueError('sdss_xid.csv not written for all objects')
files, folders = __sanity__()
if files==1 and folders==len(list_good):
print "list_good.csv and req. directories succesfully written"
else:
raise ValueError("Inconsistent number of files/directory")
#Downloading Spectra from SDSS DR14 Webpage using rsync
command='rsync -avzL --files-from=download_rsync.txt \
rsync://data.sdss.org/dr14 dr14'
os.system('cd '+dir_path+'/download_rsync.txt')
os.system(command)
#Input parameters
DB=['eboss', 'sdss']
RUN2D=['v5_10_0', '26', '103', '104']
path=map(map(functools.partial(_return_paths, database=DB), RUN2D), DB)
paths = map(lambda l:[item for sublist in l for item in sublist], path)
required_paths = filter(os.path.exists, paths)
clusters = self._cluster(cluster_path=table_path,
radec_path=input_table_path,
constraint=2)
table = self._initialise(table_path)
input_table = self._initialise(input_table_path)
#Data Manipulation
ascii.write(input_table, dir_path+'list.csv')
_move_to_except(dir_path=dir_path)
_move_to_good(dir_path=dir_path)
def __str__(self):
"""Returns current status of object"""
s1='Overall Status: {0}\n'.format(str(self._status))
s2='Combined Spec Tables: {0}\n'.format(str(self._make_combined_table))
s3='Clustering Process: {0}\n'.format(str(self._clustering))
s4='Make Spectrum: {0}\n'.format(str(self._make_spectrum))
s5='Re-organise Files/directories: {0}\n'.format(str(self._make_table))
return s1, s2, s3, s4, s5
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Catalog service."""
import abc
from oslo_config import cfg
from oslo_log import log
import six
from keystone.common import cache
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LE
from keystone import notifications
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(section='catalog')
def format_url(url, substitutions, silent_keyerror_failures=None):
"""Formats a user-defined URL with the given substitutions.
:param string url: the URL to be formatted
:param dict substitutions: the dictionary used for substitution
:param list silent_keyerror_failures: keys for which we should be silent
if there is a KeyError exception on substitution attempt
:returns: a formatted URL
"""
WHITELISTED_PROPERTIES = [
'tenant_id', 'user_id', 'public_bind_host', 'admin_bind_host',
'compute_host', 'compute_port', 'admin_port', 'public_port',
'public_endpoint', 'admin_endpoint', ]
substitutions = utils.WhiteListedItemFilter(
WHITELISTED_PROPERTIES,
substitutions)
allow_keyerror = silent_keyerror_failures or []
try:
result = url.replace('$(', '%(') % substitutions
except AttributeError:
LOG.error(_LE('Malformed endpoint - %(url)r is not a string'),
{"url": url})
raise exception.MalformedEndpoint(endpoint=url)
except KeyError as e:
if not e.args or e.args[0] not in allow_keyerror:
LOG.error(_LE("Malformed endpoint %(url)s - unknown key "
"%(keyerror)s"),
{"url": url,
"keyerror": e})
raise exception.MalformedEndpoint(endpoint=url)
else:
result = None
except TypeError as e:
LOG.error(_LE("Malformed endpoint '%(url)s'. The following type error "
"occurred during string substitution: %(typeerror)s"),
{"url": url,
"typeerror": e})
raise exception.MalformedEndpoint(endpoint=url)
except ValueError as e:
LOG.error(_LE("Malformed endpoint %s - incomplete format "
"(are you missing a type notifier ?)"), url)
raise exception.MalformedEndpoint(endpoint=url)
return result
@dependency.provider('catalog_api')
class Manager(manager.Manager):
"""Default pivot point for the Catalog backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
_ENDPOINT = 'endpoint'
_SERVICE = 'service'
_REGION = 'region'
def __init__(self):
super(Manager, self).__init__(CONF.catalog.driver)
def create_region(self, region_ref, initiator=None):
# Check duplicate ID
try:
self.get_region(region_ref['id'])
except exception.RegionNotFound:
pass
else:
msg = _('Duplicate ID, %s.') % region_ref['id']
raise exception.Conflict(type='region', details=msg)
# NOTE(lbragstad,dstanek): The description column of the region
# database cannot be null. So if the user doesn't pass in a
# description or passes in a null description then set it to an
# empty string.
if region_ref.get('description') is None:
region_ref['description'] = ''
try:
ret = self.driver.create_region(region_ref)
except exception.NotFound:
parent_region_id = region_ref.get('parent_region_id')
raise exception.RegionNotFound(region_id=parent_region_id)
notifications.Audit.created(self._REGION, ret['id'], initiator)
return ret
@MEMOIZE
def get_region(self, region_id):
try:
return self.driver.get_region(region_id)
except exception.NotFound:
raise exception.RegionNotFound(region_id=region_id)
def update_region(self, region_id, region_ref, initiator=None):
# NOTE(lbragstad,dstanek): The description column of the region
# database cannot be null. So if the user passes in a null
# description set it to an empty string.
if 'description' in region_ref and region_ref['description'] is None:
region_ref['description'] = ''
ref = self.driver.update_region(region_id, region_ref)
notifications.Audit.updated(self._REGION, region_id, initiator)
self.get_region.invalidate(self, region_id)
return ref
def delete_region(self, region_id, initiator=None):
try:
ret = self.driver.delete_region(region_id)
notifications.Audit.deleted(self._REGION, region_id, initiator)
self.get_region.invalidate(self, region_id)
return ret
except exception.NotFound:
raise exception.RegionNotFound(region_id=region_id)
@manager.response_truncated
def list_regions(self, hints=None):
return self.driver.list_regions(hints or driver_hints.Hints())
def create_service(self, service_id, service_ref, initiator=None):
service_ref.setdefault('enabled', True)
service_ref.setdefault('name', '')
ref = self.driver.create_service(service_id, service_ref)
notifications.Audit.created(self._SERVICE, service_id, initiator)
return ref
@MEMOIZE
def get_service(self, service_id):
try:
return self.driver.get_service(service_id)
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
def update_service(self, service_id, service_ref, initiator=None):
ref = self.driver.update_service(service_id, service_ref)
notifications.Audit.updated(self._SERVICE, service_id, initiator)
self.get_service.invalidate(self, service_id)
return ref
def delete_service(self, service_id, initiator=None):
try:
endpoints = self.list_endpoints()
ret = self.driver.delete_service(service_id)
notifications.Audit.deleted(self._SERVICE, service_id, initiator)
self.get_service.invalidate(self, service_id)
for endpoint in endpoints:
if endpoint['service_id'] == service_id:
self.get_endpoint.invalidate(self, endpoint['id'])
return ret
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
@manager.response_truncated
def list_services(self, hints=None):
return self.driver.list_services(hints or driver_hints.Hints())
def _assert_region_exists(self, region_id):
try:
if region_id is not None:
self.get_region(region_id)
except exception.RegionNotFound:
raise exception.ValidationError(attribute='endpoint region_id',
target='region table')
def _assert_service_exists(self, service_id):
try:
if service_id is not None:
self.get_service(service_id)
except exception.ServiceNotFound:
raise exception.ValidationError(attribute='endpoint service_id',
target='service table')
def create_endpoint(self, endpoint_id, endpoint_ref, initiator=None):
self._assert_region_exists(endpoint_ref.get('region_id'))
self._assert_service_exists(endpoint_ref['service_id'])
ref = self.driver.create_endpoint(endpoint_id, endpoint_ref)
notifications.Audit.created(self._ENDPOINT, endpoint_id, initiator)
return ref
def update_endpoint(self, endpoint_id, endpoint_ref, initiator=None):
self._assert_region_exists(endpoint_ref.get('region_id'))
self._assert_service_exists(endpoint_ref.get('service_id'))
ref = self.driver.update_endpoint(endpoint_id, endpoint_ref)
notifications.Audit.updated(self._ENDPOINT, endpoint_id, initiator)
self.get_endpoint.invalidate(self, endpoint_id)
return ref
def delete_endpoint(self, endpoint_id, initiator=None):
try:
ret = self.driver.delete_endpoint(endpoint_id)
notifications.Audit.deleted(self._ENDPOINT, endpoint_id, initiator)
self.get_endpoint.invalidate(self, endpoint_id)
return ret
except exception.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
@MEMOIZE
def get_endpoint(self, endpoint_id):
try:
return self.driver.get_endpoint(endpoint_id)
except exception.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
@manager.response_truncated
def list_endpoints(self, hints=None):
return self.driver.list_endpoints(hints or driver_hints.Hints())
def get_catalog(self, user_id, tenant_id):
try:
return self.driver.get_catalog(user_id, tenant_id)
except exception.NotFound:
raise exception.NotFound('Catalog not found for user and tenant')
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for an Catalog driver."""
def _get_list_limit(self):
return CONF.catalog.list_limit or CONF.list_limit
def _ensure_no_circle_in_hierarchical_regions(self, region_ref):
if region_ref.get('parent_region_id') is None:
return
root_region_id = region_ref['id']
parent_region_id = region_ref['parent_region_id']
while parent_region_id:
# NOTE(wanghong): check before getting parent region can ensure no
# self circle
if parent_region_id == root_region_id:
raise exception.CircularRegionHierarchyError(
parent_region_id=parent_region_id)
parent_region = self.get_region(parent_region_id)
parent_region_id = parent_region.get('parent_region_id')
@abc.abstractmethod
def create_region(self, region_ref):
"""Creates a new region.
:raises: keystone.exception.Conflict
:raises: keystone.exception.RegionNotFound (if parent region invalid)
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_regions(self, hints):
"""List all regions.
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
:returns: list of region_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_region(self, region_id):
"""Get region by id.
:returns: region_ref dict
:raises: keystone.exception.RegionNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_region(self, region_id, region_ref):
"""Update region by id.
:returns: region_ref dict
:raises: keystone.exception.RegionNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_region(self, region_id):
"""Deletes an existing region.
:raises: keystone.exception.RegionNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_service(self, service_id, service_ref):
"""Creates a new service.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_services(self, hints):
"""List all services.
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
:returns: list of service_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_service(self, service_id):
"""Get service by id.
:returns: service_ref dict
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_service(self, service_id, service_ref):
"""Update service by id.
:returns: service_ref dict
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_service(self, service_id):
"""Deletes an existing service.
:raises: keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_endpoint(self, endpoint_id, endpoint_ref):
"""Creates a new endpoint for a service.
:raises: keystone.exception.Conflict,
keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_endpoint(self, endpoint_id):
"""Get endpoint by id.
:returns: endpoint_ref dict
:raises: keystone.exception.EndpointNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_endpoints(self, hints):
"""List all endpoints.
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
:returns: list of endpoint_refs or an empty list.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def update_endpoint(self, endpoint_id, endpoint_ref):
"""Get endpoint by id.
:returns: endpoint_ref dict
:raises: keystone.exception.EndpointNotFound
keystone.exception.ServiceNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_endpoint(self, endpoint_id):
"""Deletes an endpoint for a service.
:raises: keystone.exception.EndpointNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_catalog(self, user_id, tenant_id):
"""Retrieve and format the current service catalog.
Example::
{ 'RegionOne':
{'compute': {
'adminURL': u'http://host:8774/v1.1/tenantid',
'internalURL': u'http://host:8774/v1.1/tenant_id',
'name': 'Compute Service',
'publicURL': u'http://host:8774/v1.1/tenantid'},
'ec2': {
'adminURL': 'http://host:8773/services/Admin',
'internalURL': 'http://host:8773/services/Cloud',
'name': 'EC2 Service',
'publicURL': 'http://host:8773/services/Cloud'}}
:returns: A nested dict representing the service catalog or an
empty dict.
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented() # pragma: no cover
def get_v3_catalog(self, user_id, tenant_id):
"""Retrieve and format the current V3 service catalog.
The default implementation builds the V3 catalog from the V2 catalog.
Example::
[
{
"endpoints": [
{
"interface": "public",
"id": "--endpoint-id--",
"region": "RegionOne",
"url": "http://external:8776/v1/--project-id--"
},
{
"interface": "internal",
"id": "--endpoint-id--",
"region": "RegionOne",
"url": "http://internal:8776/v1/--project-id--"
}],
"id": "--service-id--",
"type": "volume"
}]
:returns: A list representing the service catalog or an empty list
:raises: keystone.exception.NotFound
"""
v2_catalog = self.get_catalog(user_id, tenant_id)
v3_catalog = []
for region_name, region in six.iteritems(v2_catalog):
for service_type, service in six.iteritems(region):
service_v3 = {
'type': service_type,
'endpoints': []
}
for attr, value in six.iteritems(service):
# Attributes that end in URL are interfaces. In the V2
# catalog, these are internalURL, publicURL, and adminURL.
# For example, <region_name>.publicURL=<URL> in the V2
# catalog becomes the V3 interface for the service:
# { 'interface': 'public', 'url': '<URL>', 'region':
# 'region: '<region_name>' }
if attr.endswith('URL'):
v3_interface = attr[:-len('URL')]
service_v3['endpoints'].append({
'interface': v3_interface,
'region': region_name,
'url': value,
})
continue
# Other attributes are copied to the service.
service_v3[attr] = value
v3_catalog.append(service_v3)
return v3_catalog
|
|
import py.test
from tiddlyweb.config import config
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.bag import Bag
from tiddlywebplugins.utils import get_store
from tiddlywebplugins.mysql3 import index_query
from tiddlywebplugins.mysql3 import Base
def setup_module(module):
module.store = get_store(config)
module.environ = {'tiddlyweb.config': config,
'tiddlyweb.store': module.store}
session = module.store.storage.session
# delete everything
Base.metadata.drop_all()
Base.metadata.create_all()
def test_simple_store():
bag = Bag(u'bag1')
store.put(bag)
tiddler = Tiddler(u'tiddler1', u'bag1')
tiddler.text = u'oh hello i chrisdent have nothing to say here you know'
tiddler.tags = [u'apple', u'orange', u'pear']
tiddler.fields[u'house'] = u'cottage'
store.put(tiddler)
retrieved = Tiddler(u'tiddler1', u'bag1')
retrieved = store.get(retrieved)
assert retrieved.text == tiddler.text
def test_simple_search():
tiddlers = list(store.search('chrisdent'))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'tiddler1'
assert tiddlers[0].bag == 'bag1'
tiddlers = list(store.search('hello'))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'tiddler1'
assert tiddlers[0].bag == 'bag1'
def test_index_query_id():
kwords = {'id': u'bag1:tiddler1'}
tiddlers = list(index_query(environ, **kwords))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'tiddler1'
assert tiddlers[0].bag == 'bag1'
def test_index_query_filter():
kwords = {'tag': u'orange'}
tiddlers = list(index_query(environ, **kwords))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'tiddler1'
assert tiddlers[0].bag == 'bag1'
def test_index_query_filter_fields():
kwords = {'house': u'cottage'}
tiddlers = list(index_query(environ, **kwords))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'tiddler1'
assert tiddlers[0].bag == 'bag1'
assert tiddlers[0].fields['house'] == 'cottage'
kwords = {u'house': u'mansion'}
tiddlers = list(index_query(environ, **kwords))
assert len(tiddlers) == 0
def test_index_query_filter_fields():
kwords = {'bag': u'bag1', 'house': u'cottage'}
tiddlers = list(index_query(environ, **kwords))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'tiddler1'
assert tiddlers[0].bag == 'bag1'
assert tiddlers[0].fields['house'] == 'cottage'
def test_search_right_revision():
tiddler = Tiddler(u'revised', u'bag1')
tiddler.text = u'alpha'
tiddler.fields[u'house'] = u'cottage'
store.put(tiddler)
tiddler = Tiddler(u'revised', u'bag1')
tiddler.text = u'beta'
tiddler.fields[u'house'] = u'mansion'
store.put(tiddler)
tiddler = Tiddler(u'revised', u'bag1')
tiddler.text = u'gamma'
tiddler.fields[u'house'] = u'barn'
store.put(tiddler)
tiddler = Tiddler(u'revised', u'bag1')
tiddler.text = u'delta'
tiddler.fields[u'house'] = u'bungalow'
store.put(tiddler)
tiddler = Tiddler(u'revised', u'bag1')
tiddler.text = u'epsilon'
tiddler.fields[u'house'] = u'treehouse'
store.put(tiddler)
tiddlers = list(store.search('beta'))
assert len(tiddlers) == 0
tiddlers = list(store.search('epsilon'))
assert len(tiddlers) == 1
tiddler = store.get(Tiddler(tiddlers[0].title, tiddlers[0].bag))
assert tiddler.title == 'revised'
assert tiddler.bag == 'bag1'
assert tiddler.fields['house'] == 'treehouse'
kwords = {'bag': u'bag1', 'house': u'barn'}
tiddlers = list(index_query(environ, **kwords))
assert len(tiddlers) == 0
kwords = {'bag': u'bag1', 'house': u'treehouse'}
tiddlers = list(index_query(environ, **kwords))
assert tiddlers[0].title == 'revised'
assert tiddlers[0].bag == 'bag1'
assert tiddlers[0].fields['house'] == 'treehouse'
kwords = {'bag': u'bag1', 'tag': u'orange'}
tiddlers = list(index_query(environ, **kwords))
assert len(tiddlers) == 1
kwords = {'bag': u'bag1', 'tag': u'rang'}
tiddlers = list(index_query(environ, **kwords))
assert len(tiddlers) == 0
def test_search_follow_syntax():
QUERY = u'ftitle:GettingStarted (bag:cdent_public OR bag:fnd_public)'
store.put(Bag(u'fnd_public'))
store.put(Bag(u'cdent_public'))
tiddler = Tiddler(u'GettingStarted', u'fnd_public')
tiddler.text = u'fnd starts'
tiddler.fields[u'house'] = u'treehouse'
tiddler.fields[u'car'] = u'porsche'
store.put(tiddler)
tiddler = Tiddler(u'GettingStarted', u'cdent_public')
tiddler.text = u'cdent starts'
tiddler.fields[u'left-hand'] = u'well dirty'
store.put(tiddler)
tiddler = Tiddler(u'other', u'cdent_public')
tiddler.text = u'cdent starts'
store.put(tiddler)
tiddlers = list(store.search(u'starts'))
assert len(tiddlers) == 3
tiddlers = list(store.search(QUERY))
assert len(tiddlers) == 2
tiddlers = list(store.search(u'cdent starts'))
assert len(tiddlers) == 2
tiddlers = list(store.search(u'fnd starts'))
assert len(tiddlers) == 1
tiddler = list(store.search(u'left-hand:"well dirty"'))
assert len(tiddlers) == 1
def test_search_arbitrarily_complex():
QUERY = u'ftitle:GettingStarted (bag:cdent_public OR bag:fnd_public) house:treehouse'
tiddlers = list(store.search(QUERY))
assert len(tiddlers) == 1
QUERY = u'ftitle:GettingStarted ((bag:cdent_public OR bag:fnd_public) AND (house:treehouse AND car:porsche))'
tiddlers = list(store.search(QUERY))
assert len(tiddlers) == 1
def test_field_with_dot():
tiddler = Tiddler(u'geoplace', u'cdent_public')
tiddler.text = u'some place somewhere'
tiddler.fields[u'geo.lat'] = u'1.25'
tiddler.fields[u'geo.long'] = u'-45.243'
store.put(tiddler)
tiddlers = list(store.search(u'geo.lat:1.2*'))
assert len(tiddlers) == 1
tiddlers = list(store.search(u'geo.lat:"1.2*" AND geo.long:"-45.*"'))
assert len(tiddlers) == 1
tiddlers = list(store.search(u'geo.lat:"1.3*" AND geo.long:"-46.*"'))
assert len(tiddlers) == 0
tiddlers = list(store.search(u'geo.lat:"1.2*" OR geo.long:"-46.*"'))
assert len(tiddlers) == 1
def test_limited_search():
tiddlers = list(store.search(u'starts _limit:1'))
assert len(tiddlers) == 1, tiddlers
tiddlers = list(store.search(u'starts'))
assert len(tiddlers) != 1, tiddlers
tiddlers = list(store.search(u'starts _limit:so'))
assert len(tiddlers) != 1, tiddlers
def test_modified():
"""
Note the multiple store.put in here are to create
additional revisions to make sure that joins are
sufficiently limited.
"""
tiddler = Tiddler(u'GettingStarted', u'fnd_public')
tiddler.modifier = u'fnd';
store.put(tiddler)
tiddlers = list(store.search(u'modifier:fnd'))
assert len(tiddlers) == 1
tiddler = Tiddler(u'GettingStarted', u'fnd_public')
tiddler.tags = [u'monkey', u'cow', u'food']
tiddler.modifier = u'cdent';
store.put(tiddler)
store.put(tiddler)
store.put(tiddler)
store.put(tiddler)
tiddlers = list(store.search(u'modifier:fnd'))
assert len(tiddlers) == 0
tiddler = Tiddler(u'GettingFancy', u'fnd_public')
tiddler.tags = [u'cow', u'food']
tiddler.modifier = u'fnd';
store.put(tiddler)
store.put(tiddler)
store.put(tiddler)
store.put(tiddler)
tiddlers = list(store.search(u'modifier:fnd OR modifier:cdent'))
assert len(tiddlers) == 2
tiddlers = list(store.search(u'modifier:fnd NOT modifier:cdent'))
assert len(tiddlers) == 1
tiddlers = list(store.search(u'modifier:fnd NOT (modifier:cdent OR title:GettingStarted)'))
assert len(tiddlers) == 1
tiddlers = list(store.search(u'modifier:fnd AND modified:20*'))
assert len(tiddlers) == 1
def test_not():
py.test.skip('need better sql-fu to get this right')
# If we do a group by tag in the query we get reasonable
# results but we can't effectively produce that group by in
# the face of other arbitrary queries.
tiddlers = list(store.search(u'bag:fnd_public NOT tag:monkey'))
assert len(tiddlers) == 1
def test_or_tags():
tiddler = Tiddler(u'tagone', u'fnd_public')
tiddler.text = 'hi @onething hello'
tiddler.tags = ['one','three', 'five']
store.put(tiddler)
tiddler = Tiddler(u'tagtwo', u'fnd_public')
tiddler.text = 'hi @twothing hello'
tiddler.tags = ['two', 'four', 'six']
store.put(tiddler)
tiddlers = list(store.search(u'@twothing OR tag:one'))
assert len(tiddlers) == 2
def test_at_tags():
tiddler = Tiddler(u'tagat', u'fnd_public')
tiddler.text = 'new stuff to not mess'
tiddler.tags = ['one','three', 'five', '@cdent']
store.put(tiddler)
tiddlers = list(store.search(u'tag:@cdent'))
assert len(tiddlers) == 1
def test_paren_title():
tiddler = Tiddler(u'(i)', u'fnd_public')
tiddler.text = 'hi @onething hello'
tiddler.tags = ['one','three', 'five']
store.put(tiddler)
tiddlers = list(store.search(u'title:(i)'))
assert len(tiddlers) == 1
assert tiddlers[0].title == '(i)'
def test_text_as_field():
tiddlers = list(store.search(u'text:hello'))
assert len(tiddlers) == 4, tiddlers
def test_srevision_attr():
tiddlers = list(store.search(u'fields:hello'))
assert len(tiddlers) == 0, tiddlers
def test_tiddler_field_join():
tiddler = Tiddler(u'fieldtest', u'fnd_public')
tiddler.text = 'hi again'
tiddler.fields = {
u'barney': u'evil',
u'soup': u'good',
}
store.put(tiddler)
tiddlers = list(store.search(u'barney:evil'))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'fieldtest'
tiddlers = list(store.search(u'barney:evil AND soup:good'))
assert len(tiddlers) == 1
assert tiddlers[0].title == 'fieldtest'
|
|
from bs4 import BeautifulSoup
from datetime import datetime, timezone
from collections import OrderedDict
import requests, time, json, csv, os, random
def new_payload(block, flat_type, contract, bto_date):
return {
"Flat": flat_type,
"Block": block,
"Contract": contract,
"Town": "Toa Payoh",
"Flat_Type": "BTO",
"ethnic": "Y",
"ViewOption": "A",
"projName": "A",
"DesType": "A",
"EthnicA": "Y",
"EthnicM": "",
"EthnicC": "",
"EthnicO": "",
"numSPR": "",
"dteBallot": bto_date,
"Neighbourhood": "N9",
"BonusFlats1": "N",
"searchDetails": "",
"isTownChange": "No",
"brochure": "false"
}
class Unit:
def __init__(self, unit_no, booked, cost="", size=""):
self.unit_no = unit_no
self.booked = booked
self.cost = cost
self.size = size
self.floor, self.stack = unit_no[1:].split('-')
def update(self, block, flat_type):
self.block = block
self.flat_type = flat_type
def sort_key(self):
return [self.block, self.flat_type, self.stack, self.floor]
def row(self):
status = 'booked' if self.booked else 'available'
return [self.block, self.flat_type, self.unit_no, self.floor, self.stack, status, self.size, self.cost]
@staticmethod
def row_header():
return ['block', 'flat_type', 'unit_no', 'floor', 'stack', 'status', 'size', 'cost']
def unit_from_soup(soup):
# Unbooked
if soup.find('a'):
u = soup.find('font')
unit_no = u.get('id')
cost, size = u.get('title').replace('\xa0',' ').replace('<br/>', '\n').split('____________________')
return Unit(unit_no, False, cost.strip(), size.strip())
else:
unit_no = soup.find('font').text.strip()
return Unit(unit_no, True)
def parse(html):
soup = BeautifulSoup(html, 'html.parser')
block_details = soup.find(id='blockDetails')
unit_details = block_details.find_all(class_='row')[4].find_all('td')
return [unit_from_soup(unit) for unit in unit_details]
def fetch(s, url, payload):
return s.get(url, params=payload)
def fetch_and_parse(s, url, payload):
r = fetch(s, url, payload)
units = parse(r.text)
return units
def write_json(filename, all_units):
os.makedirs(os.path.dirname(filename), exist_ok=True)
unit_json = {
"timestamp": datetime.now(timezone.utc).astimezone().isoformat(),
"units": all_units
}
with open(filename, 'w') as out:
out.write(json.dumps(unit_json, default=lambda obj: OrderedDict(sorted(obj.__dict__.items()))))
def write_csv(filename, all_units):
os.makedirs(os.path.dirname(filename), exist_ok=True)
rows = [unit.row() for unit in all_units]
with open(filename, 'w', newline='') as out:
writer = csv.writer(out)
writer.writerow(Unit.row_header())
writer.writerows(rows)
def flat_stats(flat_type, units):
available = len(list(filter(lambda unit: unit.flat_type == flat_type, units)))
booked = len(list(filter(lambda unit: unit.flat_type == flat_type and unit.booked, units)))
return [booked, available]
def write_stats(filename, all_units, blocks_and_flat_types, expected_count):
flat_type_count = OrderedDict()
flat_types = sorted(expected_count.keys())
with open(filename, 'w') as out:
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
out.write("Time: {}\n".format(timestamp))
out.write("Health check\n")
for flat_type in flat_types:
flat_type_count[flat_type] = len(list(filter(lambda unit: unit.flat_type == flat_type, all_units)))
if tuple(flat_type_count.items()) == tuple(expected_count.items()):
out.write("###OK###\n")
else:
out.write("\n\tTotal retrieved flats did not match expected count.\n")
out.write("\tRetrieved: {}\n".format(tuple(flat_type_count.items())))
out.write("\tExpected: {}\n".format(tuple(expected_count.items())))
return
out.write("\nCumulative Selected Stats\n")
for flat_type in flat_types:
booked, available = flat_stats(flat_type, all_units)
out.write("\t{}: {}/{} ({:.2f}%) selected\n".format(flat_type, booked, available, (booked / available)*100))
out.write("\nPer Block Selected Stats\n")
for block, flat_types in blocks_and_flat_types.items():
out.write("\t{}\n".format(block))
units = list(filter(lambda unit: unit.block == block, all_units))
for flat_type in flat_types:
booked, available = flat_stats(flat_type, units)
out.write("\t{}: {}/{} ({:.2f}%) selected\n".format(flat_type, booked, available, (booked / available)*100))
out.write("\n")
def grab_data(url, blocks_and_flat_types, contracts, expected_count, filename, bto_date):
s = requests.Session()
# Need to make an initial request to grab the cookies
s.get("http://services2.hdb.gov.sg/webapp/BP13AWFlatAvail/BP13EBSFlatSearch?Town=Toa%20Payoh&Flat_Type=BTO&DesType=Aðnic=Y&Flat=4-Room&ViewOption=A&dteBallot={}&projName=A&brochure=false".format(bto_date))
all_units = []
debug = ""
timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
print("[{}] Start".format(datetime.now()))
for block, flat_types in blocks_and_flat_types.items():
contract = contracts[block]
for flat_type in flat_types:
payload = new_payload(block, flat_type, contract, bto_date)
units = fetch_and_parse(s, url, payload)
print("[{}] {} {}: Found {} units".format(datetime.now(), block, flat_type, len(units)))
for i, unit in enumerate(units):
unit.update(block, flat_type)
units[i] = unit
all_units.extend(units)
time.sleep(random.uniform(0, 3))
all_units = sorted(all_units, key=lambda unit: unit.sort_key())
write_json("data/{}.json".format(filename), all_units)
write_csv("data/{}.csv".format(filename), all_units)
write_stats("data/{}.log".format(filename), all_units, blocks_and_flat_types, expected_count)
print("[{}] End".format(datetime.now()))
print("======================================\n")
if __name__ == "__main__":
url = "http://services2.hdb.gov.sg/webapp/BP13AWFlatAvail/BP13EBSFlatSearch"
# Nov 2015 selection has ended
#blocks_and_flat_types = {
# "101A": ["2-Room Flexi (Short Lease/99-Year Lease)", "3-Room", "4-Room"],
# "102A": ["2-Room Flexi (Short Lease/99-Year Lease)", "4-Room"],
# "102B": ["3-Room", "4-Room"],
# "103A": ["3-Room", "4-Room"],
# "103B": ["3-Room", "4-Room"],
# "104A": ["2-Room Flexi (Short Lease/99-Year Lease)", "3-Room", "4-Room"],
# "105A": ["4-Room", "5-Room"],
# "105B": ["4-Room", "5-Room"],
# "106A": ["4-Room", "5-Room"],
# "106B": ["4-Room", "5-Room"],
# "115A": ["3-Room", "4-Room"],
# "115C": ["3-Room", "4-Room"],
# "118A": ["3-Room", "4-Room"]
#}
#contracts = {
# "101A": "C1",
# "102A": "C1",
# "102B": "C1",
# "103A": "C1",
# "103B": "C1",
# "104A": "C1",
# "105A": "C4",
# "105B": "C4",
# "106A": "C4",
# "106B": "C4",
# "115A": "C3",
# "115C": "C3",
# "118A": "C3"
#}
#expected_count = {
# "2-Room Flexi (Short Lease/99-Year Lease)": 192,
# "3-Room": 567,
# "4-Room": 1229,
# "5-Room": 151
#}
#blocks_and_flat_types = OrderedDict(sorted(blocks_and_flat_types.items()))
#expected_count = OrderedDict(sorted(expected_count.items()))
#grab_data(url, blocks_and_flat_types, contracts, expected_count, 'bidadari', '201511')
blocks_and_flat_types = {
"107A": ["3-Room", "4-Room", "5-Room"],
"107B": ["4-Room", "5-Room"],
"108A": ["3-Room", "4-Room"],
"108B": ["4-Room", "5-Room"],
"109A": ["4-Room", "5-Room"],
"109B": ["4-Room", "5-Room"],
"110A": ["4-Room", "5-Room"],
"110B": ["4-Room", "5-Room"],
"111A": ["2-Room Flexi (Short Lease/99-Year Lease)","4-Room"],
"111B": ["2-Room Flexi (Short Lease/99-Year Lease)","4-Room"],
"112A": ["2-Room Flexi (Short Lease/99-Year Lease)","4-Room"],
"112B": ["3-Room", "4-Room"],
"113A": ["3-Room", "4-Room"],
"113B": ["3-Room", "4-Room"],
"114A": ["2-Room Flexi (Short Lease/99-Year Lease)","3-Room", "4-Room"],
"114B": ["2-Room Flexi (Short Lease/99-Year Lease)","3-Room", "4-Room"],
}
contracts = {
"107A": "C7",
"107B": "C7",
"108A": "C7",
"108B": "C7",
"109A": "C7",
"109B": "C7",
"110A": "C7",
"110B": "C7",
"111A": "C6",
"111B": "C6",
"112A": "C6",
"112B": "C6",
"113A": "C6",
"113B": "C6",
"114A": "C6",
"114B": "C6",
}
expected_count = {
"2-Room Flexi (Short Lease/99-Year Lease)": 218,
"3-Room": 340,
"4-Room": 800,
"5-Room": 236
}
blocks_and_flat_types = OrderedDict(sorted(blocks_and_flat_types.items()))
expected_count = OrderedDict(sorted(expected_count.items()))
grab_data(url, blocks_and_flat_types, contracts, expected_count, 'bidadari_2', '201602')
|
|
# Licensed under a 3-clause BSD style license - see PYFITS.rst
from __future__ import division, with_statement
import bz2
import gzip
import mmap
import operator
import os
import sys
import tempfile
import warnings
import zipfile
from functools import reduce
import numpy as np
from numpy import memmap as Memmap
from .util import (isreadable, iswritable, isfile, fileobj_open, fileobj_name,
fileobj_closed, fileobj_mode, _array_from_file,
_array_to_file, _write_string)
from ...extern.six import b, string_types
from ...utils.data import download_file, _is_url
from ...utils.decorators import classproperty, deprecated_renamed_argument
from ...utils.exceptions import AstropyUserWarning
# Maps astropy.io.fits-specific file mode names to the appropriate file
# modes to use for the underlying raw files
IO_FITS_MODES = {
'readonly': 'rb',
'copyonwrite': 'rb',
'update': 'rb+',
'append': 'ab+',
'ostream': 'wb',
'denywrite': 'rb'}
# Maps OS-level file modes to the appropriate astropy.io.fits specific mode
# to use when given file objects but no mode specified; obviously in
# IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite'
# both require the file to be opened in 'rb' mode. But 'readonly' is the
# default behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
'rb': 'readonly', 'rb+': 'update',
'wb': 'ostream', 'wb+': 'update',
'ab': 'ostream', 'ab+': 'append'}
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {'readonly': 'c', 'copyonwrite': 'c', 'update': 'r+',
'append': 'c', 'denywrite': 'r'}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b('\x1f\x8b\x08')
PKZIP_MAGIC = b('\x50\x4b\x03\x04')
BZIP2_MAGIC = b('\x42\x5a')
try:
import pathlib
except ImportError:
HAS_PATHLIB = False
else:
HAS_PATHLIB = True
class _File(object):
"""
Represents a FITS file on disk (or in some other file-like object).
"""
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', pending=True)
def __init__(self, fileobj=None, mode=None, memmap=None, overwrite=False,
cache=True):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
if fileobj is None:
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
self.simulateonly = True
return
else:
self.simulateonly = False
# If fileobj is of type pathlib.Path
if HAS_PATHLIB and isinstance(fileobj, pathlib.Path):
fileobj = str(fileobj)
# Holds mmap instance for files that use mmap
self._mmap = None
if mode is None:
if _is_random_access_file_backed(fileobj):
fmode = fileobj_mode(fileobj)
# If the mode is unsupported just leave it as None; we'll
# catch this case below
mode = FILE_MODES.get(fmode)
else:
mode = 'readonly' # The default
if mode not in IO_FITS_MODES:
raise ValueError("Mode '{}' not recognized".format(mode))
if (isinstance(fileobj, string_types) and
mode not in ('ostream', 'append') and
_is_url(fileobj)): # This is an URL.
self.name = download_file(fileobj, cache=cache)
else:
self.name = fileobj_name(fileobj)
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# More defaults to be adjusted below as necessary
self.compression = None
self.readonly = False
self.writeonly = False
# Initialize the internal self._file object
if _is_random_access_file_backed(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, string_types):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = 'gzip'
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = 'zip'
elif isinstance(fileobj, bz2.BZ2File):
self.compression = 'bzip2'
if (mode in ('readonly', 'copyonwrite', 'denywrite') or
(self.compression and mode == 'update')):
self.readonly = True
elif (mode == 'ostream' or
(self.compression and mode == 'append')):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if (mode == 'ostream' or self.compression or
not hasattr(self._file, 'seek')):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return '<{}.{} {}>'.format(self.__module__, self.__class__.__name__,
self._file)
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, 'read'):
raise EOFError
try:
return self._file.read(size)
except IOError:
# On some versions of Python, it appears, GzipFile will raise an
# IOError if you try to read past its end (as opposed to just
# returning '')
if self.compression == 'gzip':
return ''
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, 'read'):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError('size {} not a multiple of {}'.format(size, dtype))
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn('No size or shape given to readarray(); assuming a '
'shape of (1,)', AstropyUserWarning)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize < size:
raise ValueError('size {} is too few bytes for a {} array of '
'{}'.format(size, shape, dtype))
if actualsize < size:
raise ValueError('size {} is too many bytes for a {} array of '
'{}'.format(size, shape, dtype))
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
memmap = Memmap(self._file, mode=MEMMAP_MODES[self.mode],
dtype=np.uint8)
# Now we immediately discard the memmap array; we are
# really just using it as a factory function to instantiate
# the mmap object in a convenient way (may later do away
# with this usage)
self._mmap = memmap.base
# Prevent dorking with self._memmap._mmap by memmap.__del__
# in Numpy 1.6 (see
# https://github.com/numpy/numpy/commit/dcc355a0b179387eeba10c95baf2e1eb21d417c7)
memmap._mmap = None
del memmap
return np.ndarray(shape=shape, dtype=dtype, offset=offset,
buffer=self._mmap)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count, '')
data.shape = shape
return data
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if hasattr(self._file, 'write'):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if hasattr(self._file, 'write'):
_array_to_file(array, self._file)
def flush(self):
if hasattr(self._file, 'flush'):
self._file.flush()
def seek(self, offset, whence=0):
# In newer Python versions, GzipFiles support the whence argument, but
# I don't think it was added until 2.6; instead of assuming it's
# present, we implement our own support for it here
if not hasattr(self._file, 'seek'):
return
if isinstance(self._file, gzip.GzipFile):
if whence:
if whence == 1:
offset = self._file.offset + offset
else:
raise ValueError('Seek from end not supported')
self._file.seek(offset)
else:
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn('File may have been truncated: actual file length '
'({}) is smaller than the expected size ({})'.format(
self.size, pos), AstropyUserWarning)
def tell(self):
if not hasattr(self._file, 'tell'):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, 'truncate'):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, 'close'):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
if (self._mmap is not None and
sys.getrefcount(self._mmap) == 2 + refcount_delta):
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an IOError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if ((self.file_like and hasattr(fileobj, 'len') and fileobj.len > 0) or
(os.path.exists(self.name) and os.path.getsize(self.name) != 0)):
if overwrite:
if self.file_like and hasattr(fileobj, 'truncate'):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise IOError("File {!r} already exists.".format(self.name))
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object or a GzipFile object."""
closed = fileobj_closed(fileobj)
fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
# Although we have a specific mapping in IO_FITS_MODES from our
# custom file modes to raw file object modes, many of the latter
# can be used appropriately for the former. So determine whether
# the modes match up appropriately
if ((mode in ('readonly', 'denywrite', 'copyonwrite') and
not ('r' in fmode or '+' in fmode)) or
(mode == 'append' and fmode not in ('ab+', 'rb+')) or
(mode == 'ostream' and
not ('w' in fmode or 'a' in fmode or '+' in fmode)) or
(mode == 'update' and fmode not in ('rb+', 'wb+'))):
raise ValueError(
"Mode argument '{}' does not match mode of the input "
"file ({}).".format(mode, fmode))
self._file = fileobj
elif isfile(fileobj):
self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
else:
self._file = gzip.open(self.name, IO_FITS_MODES[mode])
if fmode == 'ab+':
# Return to the beginning of the file--in Python 3 when opening in
# append mode the file pointer is at the end of the file
self._file.seek(0)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise IOError("Cannot read from/write to a closed file-like "
"object ({!r}).".format(fileobj))
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
self._file.seek(0)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if (not hasattr(self._file, 'seek') or
not hasattr(self._file, 'tell')):
self.mode = mode = 'ostream'
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if (self.mode in ('update', 'append', 'ostream') and
not hasattr(self._file, 'write')):
raise IOError("File-like object does not have a 'write' "
"method, required for mode '{}'.".format(self.mode))
# Any mode except for 'ostream' requires readability
if self.mode != 'ostream' and not hasattr(self._file, 'read'):
raise IOError("File-like object does not have a 'read' "
"method, required for mode {!r}.".format(self.mode))
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == 'ostream':
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with fileobj_open(self.name, 'rb') as f:
magic = f.read(4)
else:
magic = b('')
ext = os.path.splitext(self.name)[1]
if ext == '.gz' or magic.startswith(GZIP_MAGIC):
# Handle gzip files
self._file = gzip.open(self.name, IO_FITS_MODES[mode])
self.compression = 'gzip'
elif ext == '.zip' or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
elif ext == '.bz2' or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ['update', 'append']:
raise IOError("update and append modes are not supported "
"with bzip2 files")
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = 'w' if mode == 'ostream' else 'r'
self._file = bz2.BZ2File(self.name, bzip2_mode)
else:
self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if isinstance(self._file, bz2.BZ2File) and mode == 'ostream':
pass
else:
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b' ')
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except mmap.error as exc:
warnings.warn('Failed to create mmap: {}; mmap use will be '
'disabled'.format(str(exc)), AstropyUserWarning)
del exc
return False
try:
mm.flush()
except mmap.error:
warnings.warn('mmap.flush is unavailable on this platform; '
'using mmap in writeable mode will be disabled',
AstropyUserWarning)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ('update', 'append'):
raise IOError(
"Writing to zipped fits files is not currently "
"supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise IOError(
"Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix='.fits')
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
self.compression = 'zip'
def _is_random_access_file_backed(fileobj):
"""Returns `True` if fileobj is a `file` or `io.FileIO` object or a
`gzip.GzipFile` object.
Although reading from a zip file is supported, this does not include
support for random access, and we do not yet support reading directly
from an already opened `zipfile.ZipFile` object.
"""
return isfile(fileobj) or isinstance(fileobj, gzip.GzipFile)
|
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
try:
# Python 2.x
import ConfigParser as configparser
except ImportError:
# Python 3.x
import configparser
import atexit
import logging
import logging.handlers
import os
import pwd
import socket
import sys
from mcrunner import __version__
from mcrunner.connection import ServerSocketConnection
from mcrunner.daemon import Daemon
from mcrunner.exceptions import (
ConfigException,
MCRunnerException,
ServerNotRunningException,
ServerStartException,
)
from mcrunner.server import MinecraftServer
from mcrunner.server_status import ServerStatus
logger = logging.getLogger(__name__)
MCRUNNERD_COMMAND_DELIMITER = '|+|'
class MCRunner(Daemon):
"""
MCRunner daemon class (mcrunnerd).
On startup, the mcrunnerd daemon creates a unix socket which facilitates communication
between the daemon process and MCRunner client frontends. MCRunner clients use the
socket for primitive communication to start and stop Minecraft
"""
CONFIG_DEFAULTS = {
'user': None
}
log_file = None
user = None
sock_file = None
servers = None
def __init__(self, *args, **kwargs):
self.config_file = kwargs.pop('config_file', '/etc/mcrunner/mcrunner.conf')
self.pid_file = kwargs.pop('pid_file', '/tmp/mcrunner.pid')
if not os.path.exists(self.config_file):
raise ConfigException('Config file missing: %s' % self.config_file)
self.load_config()
self.setup_logger()
self.set_uid()
super(MCRunner, self).__init__(self.pid_file, *args, **kwargs)
def load_config(self):
"""
Load config from file.
"""
self.servers = {}
config = configparser.ConfigParser(defaults=self.CONFIG_DEFAULTS)
config.read(self.config_file)
for section in config.sections():
if section == 'mcrunnerd':
self.log_file = config.get(section, 'logfile')
self.user = config.get(section, 'user')
elif section == 'mcrunner':
self.sock_file = config.get(section, 'url')
elif section.startswith('server:'):
_, name = section.split('server:')
items = config.items(section)
items_dict = dict(items)
# convert bool values
for k, v in items_dict.items():
if isinstance(v, str):
if v.lower() in ('false', 'no', 'off'):
items_dict[k] = False
elif v.lower() in ('true', 'yes', 'on'):
items_dict[k] = True
self.servers[name] = MinecraftServer(
name,
items_dict.pop('path'),
items_dict.pop('jar'),
items_dict.pop('opts'),
**items_dict
)
def socket_server(self):
"""
Create and initialize unix socket at the path stored in configuration.
"""
try:
os.unlink(self.sock_file)
except OSError:
pass
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(self.sock_file)
sock.listen(1)
return sock
def setup_logger(self):
"""
Setup root logger for use in all modules.
"""
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.handlers.RotatingFileHandler(self.log_file, maxBytes=2000000, backupCount=10)
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
def get_status(self, connection):
"""
Return a string representation of all server statuses.
"""
response = []
for server_name, server in self.servers.items():
response.append('%s: %s' % (server_name, server.get_status().value))
connection.send_message('\n'.join(response))
def start_minecraft_server(self, name, connection=None):
"""
Attempt to start a server of a given name.
"""
server = self.servers.get(name)
if not server:
if connection:
connection.send_message('Minecraft server "%s" not defined.' % name)
return
try:
server.start(connection=connection)
except ServerStartException:
pass
def stop_minecraft_server(self, name, connection=None):
"""
Attempt to stop a server of a given name.
"""
server = self.servers.get(name)
if not server:
if connection:
connection.send_message('Minecraft server "%s" not defined' % name)
return
try:
server.stop(connection=connection)
except ServerNotRunningException:
pass
def send_command(self, name, command, connection):
"""
Send command string to server of a given name.
"""
server = self.servers.get(name)
if not server:
connection.send_message('Minecraft server "%s" not defined' % name)
return
logger.info('Sending command to server "%s": "%s"', name, command)
try:
server.run_command(command, connection=connection)
except ServerNotRunningException:
message = 'Minecraft server "%s" not running' % name
logger.warning(message)
connection.send_message(message)
else:
connection.send_message('Sent command to Minecraft server "%s": "%s"' % (name, command))
def handle_socket_data(self, data, connection):
"""
Handles socket data from an mcrunner client and returns a two-tuple
of a bool indicating whether a response is warranted and the message string
of the response if any.
"""
parts = data.split(MCRUNNERD_COMMAND_DELIMITER)
if parts[0] == 'status':
self.get_status(connection)
elif parts[0] == 'start':
self.start_minecraft_server(parts[1], connection=connection)
elif parts[0] == 'stop':
self.stop_minecraft_server(parts[1], connection=connection)
elif parts[0] == 'restart':
self.stop_minecraft_server(parts[1], connection=connection)
self.start_minecraft_server(parts[1], connection=connection)
elif parts[0] == 'command':
self.send_command(parts[1], parts[2], connection)
def on_exit(self):
"""
Exit signal handler, attempt to shut down all Minecraft servers.
"""
for server_name, server in self.servers.items():
if server.get_status() == ServerStatus.RUNNING:
self.stop_minecraft_server(server_name)
def set_uid(self):
"""
Set uid for daemon.
"""
if not self.user:
return
try:
pwnam = pwd.getpwnam(self.user)
except KeyError:
logger.error('User not found for setuid: %s' % self.user)
sys.exit(1)
uid = pwnam.pw_uid
current_uid = os.getuid()
if current_uid == uid:
# Already running as the correct user
return
if current_uid != 0:
logger.error('Can\'t setuid if not running as root')
sys.exit(1)
try:
os.setuid(uid)
except OSError:
logger.error('Could not switch to user %s' % self.user)
sys.exit(1)
def run(self):
"""
Main daemon runloop function. Handles receiving and responding to MCRunner
client commands.
"""
atexit.register(self.on_exit)
self._log_and_output('info', 'Starting mcrunnerd (%s)...' % __version__)
try:
sock = self.socket_server()
except Exception as e:
self._log_and_output('exception', 'Could not start mcrunnerd: %s' % str(e))
return
self._log_and_output('info', 'mcrunnerd (%s) started.' % __version__)
while True:
try:
logger.debug('Awaiting socket connection')
conn, client_address = sock.accept()
connection = ServerSocketConnection(conn)
logger.debug('Established socket connection')
try:
data = connection.receive_message()
logger.debug('Handling socket data')
self.handle_socket_data(data, connection)
logger.debug('Socket data handled')
finally:
logger.debug('Closing socket connection')
connection.close()
except socket.error:
self._log_and_output('exception', 'Error during socket connection')
except SystemExit:
self._log_and_output('info', 'Stopping mcrunnerd (%s)...' % __version__)
break
self._log_and_output('info', 'mcrunnerd (%s) stopped.' % __version__)
def _log_and_output(self, level, message):
if level in ['debug', 'info', 'warning', 'error', 'exception']:
getattr(logger, level)(message)
if level in ['error', 'exception']:
_error(message)
elif level != 'debug':
_output(message)
def _output(string):
sys.stdout.write('%s\n' % string)
def _error(string):
sys.stderr.write('%s\n' % string)
def main():
try:
daemon = MCRunner()
except MCRunnerException as e:
_error(str(e))
sys.exit(2)
if len(sys.argv) == 1:
_output("Usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
first_arg = sys.argv[1]
if len(sys.argv) == 2:
if first_arg == 'start':
daemon.start()
elif first_arg == 'stop':
daemon.stop()
elif first_arg == 'restart':
daemon.restart()
else:
_output('Unknown command: %s' % first_arg)
sys.exit(2)
else:
_output("Usage: %s start|stop|restart" % sys.argv[0])
sys.exit(2)
if __name__ == "__main__":
main()
|
|
import re
from datetime import timedelta
from django import template
from django.utils import html, safestring
from django.template.defaultfilters import stringfilter
from math import ceil
register = template.Library()
@register.simple_tag
def sitenav():
return """<ul id="sitenav">
<li><a href="/" rel="sigrie" class="">Database</a></li>
<li>» <a href="/items" rel="items">Items</a></li>
<li>» <a href="/items/9" rel="items_9" class="">Recipe</a></li>
<li>» <a href="/items/9/2" rel="items_9_2">Tailoring</a></li>
</ul>
"""
def esc(text, autoescape):
if autoescape:
return html.conditional_escape(text)
return text
@register.filter
def colorinline(value, autoescape=None):
pattern = r"\|c([0-9a-f]{8})(.+)\|r"
sre = re.search(pattern, value, re.IGNORECASE)
if not sre:
return value
color, text = sre.groups()
output = '<span style="color:#%s;">%s</span>' % (color[2:], esc(text, autoescape))
output = "".join([value[:sre.start()], output, value[sre.end():]])
return safestring.mark_safe(output)
colorinline.needs_autoescape = True
@register.filter
def genderinline(value, autoescape=None):
if not value.find("$"):
return value
pattern = r"\$(G|g)\s?([^:]+):([^;]+);"
sre = re.search(pattern, value)
if not sre:
return value
char, male, female = sre.groups()
output = '<%s/%s>' % (esc(male.strip(), autoescape), esc(female.strip(), autoescape))
output = "".join([esc(value[:sre.start()], autoescape), output, esc(value[sre.end():], autoescape)])
return safestring.mark_safe(output)
genderinline.needs_autoescape = True
DURATIONS_DEFAULT = {
"second": "second",
"seconds": "seconds",
"minute": "minute",
"minutes": "minutes",
"hour": "hour",
"hours": "hours",
"day": "day",
"days": "days",
}
DURATIONS_SHORT = {
"second": "sec",
"seconds": "sec",
"minute": "min",
"minutes": "min",
"hour": "hour",
"hours": "hrs",
"day": "day",
"days": "days",
}
DURATIONS_SHORTCAP = {
"second": "Sec",
"seconds": "Sec",
"minute": "Min",
"minutes": "Min",
"hour": "Hr",
"hours": "Hr",
"day": "Day",
"days": "Days",
}
@register.filter
def duration(value, locales=DURATIONS_DEFAULT):
if not isinstance(value, timedelta):
if value < 0: value = 0
value = timedelta(microseconds=value)
if value == timedelta(seconds=1):
return "1 %s" % (locales["second"])
elif value < timedelta(minutes=1):
return "%.3g %s" % (value.seconds+float(value.microseconds)/1000000, locales["seconds"])
elif value < timedelta(hours=1):
return "%.3g %s" % (value.seconds / 60, value.seconds >= 120 and locales["minutes"] or locales["minute"])
elif value < timedelta(days=1):
return "%d %s" % (ceil(value.seconds / 3600.0), value.seconds > 3600 and locales["hours"] or locales["hour"])
else:
return "%.3g %s" % (value.days, value.days > 1 and locales["days"] or locales["day"])
duration.is_safe = True
@register.filter
def duration_short(value):
return duration(value, DURATIONS_SHORT)
@register.filter
def duration_shortcap(value):
return duration(value, DURATIONS_SHORTCAP)
PRICE_TEMPLATE = '<span class="%(letter)s">%(amt)i<span class="price-hidden">%(letter)s</span></span>'
@register.filter
def price(value, autoescape=None):
value = int(value)
if not value:
g, s, c = 0, 0, 0
else:
g = divmod(value, 10000)[0]
s = divmod(value, 100)[0] % 100
c = value % 100
output = '<span class="price">%s %s %s</span>' % (
g and PRICE_TEMPLATE % {"amt": g, "letter": "g", "alt": "Gold"} or "",
s and PRICE_TEMPLATE % {"amt": s, "letter": "s", "alt": "Silver"} or "",
c and PRICE_TEMPLATE % {"amt": c, "letter": "c", "alt": "Copper"} or "",
)
return safestring.mark_safe(output)
price.needs_autoescape = True
@register.filter
def mapify(locations, autoescape=None):
locations = locations.filter(x__gt=0, y__gt=0).select_related()
if not locations.count():
return ""
html_base = """
<div id="map-container"></div>
<script type="text/javascript">
%s
maplib.renderMaps([%s])
</script>
"""
html_vars = """
var %s = {
name: %r,
file: %r,
nodes: %r
}
"""
ret = {}
for location in locations.all():
key = "map_%i_%i" % (location.zone_id, abs(hash(location.zone.map)))
if key not in ret:
map = str(location.zone.map)
if location.floor:
map += str(location.floor)
ret[key] = (str(location.zone.name), map, [])
ret[key][2].append([location.x, location.y])
vars_list = []
for k in ret:
vars_list.append(html_vars % (k, ret[k][0], ret[k][1], ret[k][2]))
vars_html = "\n".join(vars_list)
return html_base % (vars_html, ",".join(ret.keys()))
mapify.needs_autoescape = True
@register.filter
def supermark(value):
if isinstance(value, float):
return "%+f" % value
else:
return "%+i" % int(value)
supermark.is_safe = True
@register.filter
def url(value, text="", autoescape=None):
url = hasattr(value, "get_absolute_url") and value.get_absolute_url()
if url:
classes = (hasattr(value, "get_htclasses") and ' class="%s"' % (value.get_htclasses())) or ""
html = '<a href="%s"%s>%s</a>' % (url, classes, esc(str(text or value), autoescape=True))
return safestring.mark_safe(html)
text = text or value
try:
return esc(str(text), autoescape=True)
except UnicodeError:
return text.encode("ascii", "ignore")
url.needs_autoescape = True
@register.filter
def icon(value, arg=64, autoescape=None):
try:
arg = int(arg)
except ValueError: # Invalid literal for int()
return value # Fail silently
BASE_URL = "http://db.mmo-champion.com"
url = hasattr(value, "get_absolute_url") and value.get_absolute_url()
if not url:
return safestring.mark_safe(value)
else:
icon = value.icon or "temp"
value = esc(str(value), autoescape)
return safestring.mark_safe('<a href="%s" class="iconinline"><img src="http://static.mmo-champion.com/db/img/icons/%s.png" alt="%s" width="%i" height="%i"/></a>' % (url, icon, value, arg, arg))
icon.needs_autoescape = True
@register.filter
def iconize(value, arg="small", autoescape=None):
if arg == "large":
size = 40
else:
size = 16
_icon = icon(value, size)
_url = url(value)
return safestring.mark_safe('<div class="iconized-%s"><div class="icon">%s</div> <span>%s</span></div>' % (arg, _icon, _url))
iconize.needs_autoescape = True
@register.filter
def screenshot(value, autoescape=None):
if not value:
return ""
screenshot = value[0]
url = screenshot.get_absolute_url()
# Don't give it a size as its dynamic
return safestring.mark_safe('<a id="screenshot-thumbnail" href="%s.jpg"><img src="%s.thumbnail.jpg" alt="%s"/></a>' % (url, url, screenshot.caption))
icon.needs_autoescape = True
@register.filter
def tooltip(obj, paperdoll, autoescape=None):
return safestring.mark_safe(obj.tooltip(paperdoll))
tooltip.needs_autoescape = True
@register.filter
def str_repr(value):
value = str(value)
return repr(value)
@register.filter
def verbose_name(cls):
return cls()._meta.verbose_name
@register.filter
def verbose_name_plural(cls):
return cls()._meta.verbose_name_plural
@register.filter
@stringfilter
def truncate(value, arg):
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > arg:
value = value[:arg]
if not value.endswith("..."):
value += "..."
return value
@register.tag
def sigrielisting(parser, token):
try:
cls, iterable = token.split_contents()[1:]
iterable = parser.compile_filter(iterable)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires two arguments" % token.contents[0])
return SigrieListing(cls, iterable)
class SigrieListing(template.Node):
def __init__(self, cls, iterable):
self.cls = cls
self.iterable = iterable
def render(self, context):
from sigrie.owdb import listings
cls = getattr(listings, self.cls)
iterable = self.iterable.resolve(context)
return cls(iterable).render()
|
|
# -*- coding: utf-8 -*-
from thumbnails.conf import settings
from thumbnails.errors import ThumbnailError
CROP_ALIASES = {
'x': {
'left': 0,
'center': 50,
'right': 100
},
'y': {
'top': 0,
'center': 50,
'bottom': 100
}
}
class BaseThumbnailEngine(object):
"""
A base class for Thumbnail engines. Any thumbnail engine should be a subclass of this and
implement all methods prefixed with ``engine``.
"""
def get_thumbnail(self, original, size, crop, options):
"""
Wrapper for .create() with cleanup.
:param original:
:param size:
:param crop:
:param options:
:return: An image object
"""
try:
image = self.create(original, size, crop, options)
except ThumbnailError:
image = None
finally:
self.cleanup(original)
return image
def create(self, original, size, crop, options=None):
"""
Creates a thumbnail. It loads the image, scales it and crops it.
:param original:
:param size:
:param crop:
:param options:
:return:
"""
if options is None:
options = self.evaluate_options()
image = self.engine_load_image(original)
image = self.scale(image, size, crop, options)
crop = self.parse_crop(crop, self.get_image_size(image), size)
image = self.crop(image, size, crop, options)
image = self.colormode(image, options)
return image
def scale(self, image, size, crop, options):
"""
Wrapper for ``engine_scale``, checks if the scaling factor is below one or that scale_up
option is set to True before calling ``engine_scale``.
:param image:
:param size:
:param crop:
:param options:
:return:
"""
original_size = self.get_image_size(image)
factor = self._calculate_scaling_factor(original_size, size, crop is not None)
if factor < 1 or options['scale_up']:
width = int(original_size[0] * factor)
height = int(original_size[1] * factor)
image = self.engine_scale(image, width, height)
return image
def crop(self, image, size, crop, options):
"""
Wrapper for ``engine_crop``, will return without calling ``engine_crop`` if crop is None.
:param image:
:param size:
:param crop:
:param options:
:return:
"""
if not crop:
return image
return self.engine_crop(image, size, crop, options)
def cleanup(self, original):
"""
Cleanup after thumbnail creation.
:param original:
"""
self.engine_cleanup(original)
def get_image_size(self, image):
"""
Wrapper for ``engine_image_size``
:param image:
:return: A tuple with width and height
:rtype: tuple
"""
return self.engine_image_size(image)
def raw_data(self, image, options):
"""
Wrapper for ``engine_raw_data``.
:param image:
:param options:
"""
return self.engine_raw_data(image, options)
def colormode(self, image, options):
"""
Wrapper for ``engine_colormode``.
:param image:
:param options:
:return:
"""
mode = options['colormode']
return self.engine_colormode(image, mode)
def _calculate_scaling_factor(self, original_size, size, has_crop):
factors = []
if size[0] is not None:
factors.append(float(size[0]) / original_size[0])
if size[1] is not None:
factors.append(float(size[1]) / original_size[1])
if has_crop:
return max(factors)
return min(factors)
def evaluate_options(self, options=None):
_options = options
options = self.default_options()
if _options:
options.update(_options)
return options
def default_options(self):
return {
'scale_up': settings.THUMBNAIL_SCALE_UP,
'quality': settings.THUMBNAIL_QUALITY,
'colormode': settings.THUMBNAIL_COLORMODE,
}
def get_format(self, image, options):
if 'format' in options:
return options['format']
if settings.THUMBNAIL_FORCE_FORMAT is not None:
return settings.THUMBNAIL_FORCE_FORMAT
try:
image_format = self.engine_get_format(image)
if image_format:
return image_format
except AttributeError:
pass
return settings.THUMBNAIL_FALLBACK_FORMAT
@staticmethod
def parse_size(size):
"""
Parses size string into a tuple
:param size: String on the form '100', 'x100 or '100x200'
:return: Tuple of two integers for width and height
:rtype: tuple
"""
if size.startswith('x'):
return None, int(size.replace('x', ''))
if 'x' in size:
return int(size.split('x')[0]), int(size.split('x')[1])
return int(size), None
def parse_crop(self, crop, original_size, size):
"""
Parses crop into a tuple usable by the crop function.
:param crop: String with the crop settings.
:param original_size: A tuple of size of the image that should be cropped.
:param size: A tuple of the wanted size.
:return: Tuple of two integers with crop settings
:rtype: tuple
"""
if crop is None:
return None
crop = crop.split(' ')
if len(crop) == 1:
crop = crop[0]
x_crop = 50
y_crop = 50
if crop in CROP_ALIASES['x']:
x_crop = CROP_ALIASES['x'][crop]
elif crop in CROP_ALIASES['y']:
y_crop = CROP_ALIASES['y'][crop]
x_offset = self.calculate_offset(x_crop, original_size[0], size[0])
y_offset = self.calculate_offset(y_crop, original_size[1], size[1])
return int(x_offset), int(y_offset)
@staticmethod
def calculate_offset(percent, original_length, length):
"""
Calculates crop offset based on percentage.
:param percent: A percentage representing the size of the offset.
:param original_length: The length the distance that should be cropped.
:param length: The desired length.
:return: The offset in pixels
:rtype: int
"""
return int(
max(
0,
min(percent * original_length / 100.0, original_length - length / 2) - length / 2)
)
@staticmethod
def calculate_alternative_resolution_size(resolution, size):
if size[0] is not None:
resolution_size = int(size[0] * resolution),
else:
resolution_size = None,
if size[1] is not None:
resolution_size += int(size[1] * resolution),
else:
resolution_size += None,
return resolution_size
def engine_load_image(self, original):
"""
Engine specific loading of image, should be implemented by all subclasses.
:param original: The file that should be loaded.
:return: An image as an image object used by the engine.
"""
raise NotImplementedError
def engine_raw_data(self, image, options):
"""
Engine specific saving of image into a file object, should be implemented by all subclasses.
:param image: The image object that should be saved.
:param options: Options that should be used in order to save the image e.g. quality.
:return: File object with image contents
"""
raise NotImplementedError
def engine_image_size(self, image):
"""
Engine specific fetching of image size, should be implemented by all subclasses.
:param image: The image to check size of.
:return: A tuple of two integers with width and height
:rtype: tuple
"""
raise NotImplementedError
def engine_scale(self, image, width, height):
"""
Engine specific scaling, should be implemented by all subclasses.
:param image: The image object that should be scaled.
:param width: The wanted width
:param height: The wanted height
:return:
"""
raise NotImplementedError
def engine_crop(self, image, size, crop, options):
"""
Engine specific cropping, should be implemented by all subclasses.
:param image:
:param size:
:param crop:
:param options:
:return:
"""
raise NotImplementedError
def engine_cleanup(self, original):
"""
Engine specific cleanup, should be implemented by all subclasses.
:param original:
:return:
"""
raise NotImplementedError
def engine_colormode(self, image, colormode):
"""
Sets the correct colormode on the image.
:param image:
:param colormode:
:return:
"""
raise NotImplementedError
def engine_get_format(self, image):
"""
Reads the format of the image object passed into the arguments.
:param image: An image object from the engine.
:return: A string with the current format of the image.
"""
raise NotImplementedError
|
|
# -*- encoding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Haltu Oy, http://haltu.fi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import floppyforms as forms
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.text import capfirst
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from selector.models import User, RegisterToken
class UserSelectWidget(forms.CheckboxSelectMultiple):
template_name = 'forms/inviteform_user.html'
class SearchForm(forms.Form):
municipality = forms.CharField()
school = forms.CharField()
group = forms.CharField(required=False)
class InviteForm(forms.Form):
users = forms.MultipleChoiceField(choices=set(), widget=UserSelectWidget)
def __init__(self, *args, **kwargs):
if 'users_choices' in kwargs:
users_choices = kwargs.pop('users_choices')
else:
users_choices = set()
if users_choices == None:
users_choices = set()
super(InviteForm, self).__init__(*args, **kwargs)
self.fields['users'].choices = users_choices
class RegisterForm(forms.Form):
token = forms.CharField(label=_(u"Token"))
def clean_token(self):
try:
token = RegisterToken.objects.get(token=self.cleaned_data['token'], method=RegisterToken.EMAIL)
return token
except RegisterToken.DoesNotExist:
raise forms.ValidationError("Invalid token.")
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=2048)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if self.fields['username'].label is None:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=2048,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 2048 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
)
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=commit)
user.set_unusable_password()
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=2048, regex=r"^[\w.@+-]+$",
help_text=_("Required. 2048 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
class Meta:
model = User
fields = '__all__'
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
|
|
import copy
from django.http import (
HttpResponse,
HttpResponseRedirect
)
from django.template import loader
from django.shortcuts import render
from django.views.generic import (
View,
ListView,
DetailView
)
from django.views.generic.edit import (
FormView,
UpdateView
)
from django.db.models import Q
import django.utils.http
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from mailer.signals import project_created_by_user
from mailer.signals import allocation_request_created_by_user
from accounts.models import (
User,
RcLdapUser
)
from projects.models import (
Project,
Reference,
Allocation,
AllocationRequest
)
from projects.forms import (
ProjectForm,
ProjectEditForm,
ReferenceForm,
AllocationRequestForm
)
GENERAL_ACCOUNT_REQUEST_SUBJECT = "{general_account} account request: {username}"
GENERAL_ACCOUNT_REQUEST_BODY = "Please add me ({username}) to the {general_account} account. I will use it to [insert reason or activity here]."
class ProjectAccessMixin(object):
def is_manager(self,request_user,project):
is_manager = False
if request_user in project.managers.all():
is_manager = True
return is_manager
def get_manager_or_redirect(self,request_user,project,redirect_view='projects:project-detail'):
if not self.is_manager(request_user,project):
return redirect(redirect_view, pk=project.pk)
return request_user
class ProjectListView(ListView):
model = Project
template_name = 'project-list.html'
def get_queryset(self):
user = self.request.user
manager_on = user.manager_on.all()
collaborator_on = user.collaborator_on.all()
projects = (manager_on | collaborator_on).distinct()
return projects
def get_context_data(self, **kwargs):
context = super(ProjectListView,self).get_context_data(**kwargs)
user = self.request.user
general_account = '{}-general'.format(user.organization)
context['general_account'] = general_account
context['general_request_subject'] = GENERAL_ACCOUNT_REQUEST_SUBJECT.format(username=user.username, general_account=general_account)
context['general_request_body'] = GENERAL_ACCOUNT_REQUEST_BODY.format(username=user.username, general_account=general_account)
return context
class ProjectDetailView(DetailView):
model = Project
template_name = 'project-detail.html'
def get_context_data(self, **kwargs):
context = super(ProjectDetailView, self).get_context_data(**kwargs)
references = Reference.objects.filter(project=self.object)
allocations = Allocation.objects.filter(project=self.object)
allocation_requests = AllocationRequest.objects.filter(project=self.object)
context['references'] = references
context['allocations'] = allocations
context['allocation_requests'] = allocation_requests
return context
class ProjectCreateView(FormView):
template_name = 'project-create.html'
form_class = ProjectForm
def form_valid(self, form):
creator = self.request.user
project = form.save()
if not project.managers.filter(username=creator.username).exists():
project.managers.add(creator)
project.save()
project_created_by_user.send(sender=project.__class__, project=project)
self.success_url = reverse_lazy(
'projects:project-detail',
kwargs={'pk':project.pk},
)
# Avoid calling save() multiple times, so return response directly instead
# of calling super() and letting the FormMixin class do so.
return HttpResponseRedirect(self.success_url)
class ProjectEditView(UpdateView,ProjectAccessMixin):
template_name = 'project-edit.html'
model = Project
form_class = ProjectEditForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ProjectEditView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
pk = kwargs.get('pk')
self.object = get_object_or_404(Project,pk=pk)
manager = self.get_manager_or_redirect(request.user,self.object)
return super(ProjectEditView,self).get(request,*args,**kwargs)
def post(self, request, *args, **kwargs):
path_cmp = self.request.path.split('/')
pk = int(path_cmp[-2])
self.object = get_object_or_404(Project,pk=pk)
manager = self.get_manager_or_redirect(request.user,self.object)
return super(ProjectEditView,self).post(request,*args,**kwargs)
def get_initial(self):
initial = super(ProjectEditView,self).get_initial()
initial['pi_emails'] = ','.join(self.object.pi_emails)
return initial
def form_valid(self, form):
editor = self.request.user
project = form.save()
if not project.managers.filter(username=editor.username).exists():
project.managers.add(editor)
project.save()
self.success_url = reverse_lazy(
'projects:project-detail',
kwargs={'pk':self.object.pk}
)
return HttpResponseRedirect(self.success_url)
class ReferenceDetailView(DetailView):
model = Reference
template_name = 'reference-detail.html'
def get(self, request, *args, **kwargs):
project_pk = kwargs.get('project_pk')
self.project = get_object_or_404(Project,pk=project_pk)
return super(ReferenceDetailView,self).get(request,*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(ReferenceDetailView,self).get_context_data(**kwargs)
context['project'] = self.project
return context
class ReferenceCreateView(FormView,ProjectAccessMixin):
template_name = 'reference-create.html'
form_class = ReferenceForm
def get(self, request, *args, **kwargs):
project_pk = kwargs.get('project_pk')
self.project = get_object_or_404(Project,pk=project_pk)
manager = self.get_manager_or_redirect(request.user,self.project)
return super(ReferenceCreateView,self).get(request,*args,**kwargs)
def post(self, request, *args, **kwargs):
path_cmp = self.request.path.split('/')
project_pk = int(path_cmp[-3])
self.project = get_object_or_404(Project,pk=project_pk)
manager = self.get_manager_or_redirect(request.user,self.project)
return super(ReferenceCreateView,self).post(request,*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(ReferenceCreateView,self).get_context_data(**kwargs)
context['project'] = self.project
return context
def form_valid(self, form):
ref_dict = {
'project': self.project
}
ref_dict.update(form.cleaned_data)
ref = Reference.objects.create(**ref_dict)
self.success_url = reverse_lazy(
'projects:reference-detail',
kwargs={
'project_pk':self.project.pk,
'pk':ref.pk,
}
)
return super(ReferenceCreateView,self).form_valid(form)
class ReferenceEditView(FormView,ProjectAccessMixin):
template_name = 'reference-edit.html'
form_class = ReferenceForm
def get(self, request, *args, **kwargs):
project_pk = kwargs.get('project_pk')
ref_pk = kwargs.get('pk')
self.project = get_object_or_404(Project,pk=project_pk)
self.object = get_object_or_404(Reference,pk=ref_pk)
manager = self.get_manager_or_redirect(request.user,self.project)
return super(ReferenceEditView,self).get(request,*args,**kwargs)
def post(self, request, *args, **kwargs):
path_cmp = self.request.path.split('/')
project_pk = int(path_cmp[-4])
ref_pk = int(path_cmp[-2])
self.project = get_object_or_404(Project,pk=project_pk)
self.object = get_object_or_404(Reference,pk=ref_pk)
manager = self.get_manager_or_redirect(request.user,self.project)
return super(ReferenceEditView,self).post(request,*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(ReferenceEditView,self).get_context_data(**kwargs)
context['project'] = self.project
context['object'] = self.object
return context
def get_initial(self):
initial = super(ReferenceEditView,self).get_initial()
initial['description'] = self.object.description
initial['link'] = self.object.link
return initial
def form_valid(self, form):
ref = Reference.objects.filter(
pk=self.object.pk
).update(
**form.cleaned_data
)
self.success_url = reverse_lazy(
'projects:reference-detail',
kwargs={
'project_pk':self.project.pk,
'pk':self.object.pk,
}
)
return super(ReferenceEditView,self).form_valid(form)
class AllocationRequestCreateView(FormView,ProjectAccessMixin):
template_name = 'allocation-request-create.html'
form_class = AllocationRequestForm
def get(self, request, *args, **kwargs):
project_pk = kwargs.get('project_pk')
self.project = get_object_or_404(Project,pk=project_pk)
manager = self.get_manager_or_redirect(request.user,self.project)
return super(AllocationRequestCreateView,self).get(request,*args,**kwargs)
def post(self, request, *args, **kwargs):
path_cmp = self.request.path.split('/')
project_pk = int(path_cmp[-3])
self.project = get_object_or_404(Project,pk=project_pk)
manager = self.get_manager_or_redirect(request.user,self.project)
return super(AllocationRequestCreateView,self).post(request,*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(AllocationRequestCreateView,self).get_context_data(**kwargs)
context['project'] = self.project
return context
def form_valid(self, form):
ar_dict = {
'project': self.project,
'requester': self.request.user
}
ar_dict.update(form.cleaned_data)
ar = AllocationRequest.objects.create(**ar_dict)
requester = RcLdapUser.objects.get_user_from_suffixed_username(ar.requester.username)
allocation_request_created_by_user.send(sender=ar.__class__,allocation_request=ar,requester=requester)
self.success_url = reverse_lazy(
'projects:allocation-request-detail',
kwargs={
'project_pk':self.project.pk,
'pk':ar.pk,
}
)
return super(AllocationRequestCreateView,self).form_valid(form)
class AllocationRequestDetailView(DetailView):
model = AllocationRequest
template_name = 'allocation-request-detail.html'
def get(self, request, *args, **kwargs):
project_pk = kwargs.get('project_pk')
self.project = get_object_or_404(Project,pk=project_pk)
return super(AllocationRequestDetailView,self).get(request,*args,**kwargs)
def get_context_data(self, **kwargs):
context = super(AllocationRequestDetailView,self).get_context_data(**kwargs)
context['project'] = self.project
return context
|
|
from pylab import show, quiver, figure, gca, imshow, colorbar, draw
from matplotlib.patches import Ellipse
import numpy as np
from pyBA.classes import Bivarg, Bgmap
import matplotlib.pyplot as plt
import matplotlib.colors as cols
import matplotlib.cm as cmx
def draw_objects(objects=np.array( [Bivarg()] ), replot=False, alpha=0.2,figsize=(10,10),scale=1.0,
show=False,colors="b",label=False):
"""
scale : allow you to blow up or down the ellipses (defualt: 1.0)
alpha: transparency for each ellipse (scalar or vector of length objects)
"""
ells = [Ellipse(xy=O.mu,
width=scale*2.0*np.sqrt(O.E[0,0]),
height=scale*2.0*np.sqrt(O.E[1,1]),
angle=O.theta)
for O in objects]
jet = cm = plt.get_cmap('jet')
# Decide if plot is to be on top of whatever is already plotted
if not replot:
fig = figure(figsize=figsize)
ax = fig.add_subplot(111, aspect='equal')
else:
ax = gca()
if isinstance(alpha,float) or isinstance(alpha,int):
alphas = np.ones(len(ells))*alpha
else:
if len(alpha) != len(ells):
alphas = np.ones(len(ells))*alpha[0]
else:
alphas = alpha
if not isinstance(colors,list):
colors = [colors for x in range(len(ells))]
if len(colors) != len(ells):
colors = [colors[0] for x in range(len(ells))]
cNorm = cols.Normalize(vmin=colors[0], vmax=colors[-1])
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
for i,e in enumerate(ells):
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(alphas[i])
e.set_facecolor(scalarMap.to_rgba(colors[i]))
if label:
ax.set_xlabel("RA offset arcsec")
ax.set_ylabel("DEC offset arcsec")
#ax.autoscale(enable=None, axis='both', tight=True)
draw()
if show:
show()
return ells
def make_grid(objects = np.array([ Bivarg() ]), res=30):
"""Makes evenly-space two-dimensional grid of
object locations of given resolution. Uses input object
list to define ranges.
Input: objects - list or nparray of Bivargs
res - scalar, density of grid
"""
xmin = min([o.mu[0] for o in objects])
xmax = max([o.mu[0] for o in objects])
ymin = min([o.mu[1] for o in objects])
ymax = max([o.mu[1] for o in objects])
xs = np.linspace(xmin,xmax,res)
ys = np.linspace(ymin,ymax,res)
x,y = np.meshgrid(xs,ys)
return x,y
def draw_MAP_background(objectsA = np.array([ Bivarg() ]),
objectsB = np.array([ Bivarg() ]),
P = Bgmap(),
res = 30):
""" Plot the background parametric mapping between frames (the mean function
for the gaussian process) on a grid of given resolution. Overplot observed
displacements from lists of tie objects.
"""
from pyBA.distortion import compute_displacements, astrometry_mean
from numpy import array, sqrt
# Grid for regression
x,y = make_grid(objectsA,res=res)
# Perform evaluation of background function on grid
xy = np.array([x.flatten(),y.flatten()]).T
vxy = astrometry_mean(xy, P)
vx, vy = vxy[:,0], vxy[:,1]
# Compute empirical displacements
xobs, yobs, vxobs, vyobs, sxobs, syobs = compute_displacements(objectsA, objectsB)
# Matplotlib plotting
fig = figure(figsize=(16,16))
ax = fig.add_subplot(111, aspect='equal')
quiver(x,y,vx,vy,scale_units='width',scale=res*res)
quiver(xobs,yobs,vxobs,vyobs,color='r',scale_units='width',scale=res*res)
ax.autoscale(enable=None, axis='both', tight=True)
# Also plot error ellipses on interpolated points
#ellipses = array([ Bivarg( mu = array([xarr[i,0] + vx[i], xarr[i,1] + vy[i]]),
# sigma = array([ sx[i], sy[i] ]) )
# for i in range(len(xarr)) ])
#draw_objects(ellipses, replot='yes')
show()
return
def draw_MAP_residuals(objectsA, objectsB, P, scaled='no'):
from pyBA.distortion import compute_displacements, compute_residual
from numpy import array
# Compute displacements between frames for tie objects
xobs, yobs, vxobs, vyobs, sxobs, syobs = compute_displacements(objectsA, objectsB)
# Compute residual
dx, dy = compute_residual(objectsA, objectsB, P)
# Draw residuals
fig = figure(figsize=(16,16))
ax = fig.add_subplot(111, aspect='equal')
if scaled is 'yes':
# Allow relative scaling of arrows
quiver(xobs,yobs,dx,dy)
else:
# Show residuals in absolute size (often very tiny), with uncertainties
# Also plot error ellipses
ellipses = array([ Bivarg( mu = array([xobs[i] + dx[i], yobs[i] + dy[i]]),
sigma = objectsA[i].sigma + objectsB[i].sigma )
for i in range(len(objectsA)) ])
draw_objects(ellipses, replot='yes')
# Residuals
quiver(xobs,yobs,dx,dy,color='r', angles='xy', scale_units='xy', scale=1)
ax.autoscale(enable=None, axis='both', tight=True)
show()
def draw_realisation(objectsA, objectsB, P, scale, amp, chol, res = 30):
# Grid for regression
x,y = make_grid(objectsA,res=res)
xyarr = np.array([x.flatten(),y.flatten()]).T
# If no cholesky matrix A provided, assume that we are
# drawing realisation on grid without using observed data
if chol == None:
from pyBA.distortion import realise
vx, vy = realise(xyarr, P, scale, amp)
sx, sy = None, None
# Otherwise, use cholesky data to perform regression
else:
from pyBA.distortion import regression
vx, vy, sx, sy = regression(objectsA, objectsB, xyarr, P,
scale, amp, chol)
# Get xy coordinates of base of vectors
from pyBA.distortion import compute_displacements
xobs, yobs, vxobs, vyobs, _, _ = compute_displacements(objectsA, objectsB)
# Matplotlib plotting
fig = figure(figsize=(16,16))
ax = fig.add_subplot(111, aspect='equal')
quiver(x,y,vx,vy,scale_units='width',scale=res*res)
# If uncertainties are provided, plot them as a background image and colour the
# data vectors in white
if sx is not None:
quiver(xobs,yobs,vxobs,vyobs,color='w',scale_units='width',scale=res*res)
sarr = np.array(sx + sy).reshape( x.shape )
imshow(np.sqrt(sarr), origin='upper', extent=(x.min(), x.max(), y.min(), y.max()),
interpolation=None)
colorbar()
else:
# Otherwise, no background and data vectors in red
quiver(xobs,yobs,vxobs,vyobs,color='r',scale_units='width',scale=res*res)
ax.autoscale(enable=None, axis='both', tight=True)
show()
return
|
|
#!/usr/local/bin/python3.3
# pymysql : https://github.com/PyMySQL/PyMySQL
# mutagenx : https://github.com/LordSputnik/mutagen
# ==============================================================================
import os
import sys
import time
import uuid
import shutil
import mutagen as ID3
import pymysql as SQL
# ==============================================================================
PATH_ROOT = "/Music"
PATH_COVERS = "/Music/.covers"
PATH_SERVED = "/music"
DATABASE_HOST = 'localhost'
DATABASE_USER = 'USER'
DATABASE_PASS = 'PASS'
DATABASE_STORE = 'musicapp'
INDEX_ARTIST = 1
INDEX_ALBUM = 0
INDEX_SONG = 0
# ==============================================================================
class MP3():
def __init__(self, path):
global PATH_ROOT
global PATH_SERVED
fin = ID3.File(path)
self.artist = str(fin.tags['TPE1'].text[0])
self.album = str(fin.tags['TALB'].text[0])
self.title = str(fin.tags['TIT2'].text[0])
self.genre = str(fin.tags['TCON'].text[0]) if ('TCON' in fin.tags) else ''
self.track = int(fin.tags['TRCK'].text[0].split('/')[0]) if ('TRCK' in fin.tags) else 0
self.year = int(fin.tags['TDRC'].text[0].year) if ('TDRC' in fin.tags) else 1982
self.uuid = str(uuid.uuid4()).upper()
self.path = ''.join([PATH_SERVED, path[len(PATH_ROOT):]])
self.cover = None
for tag in fin.tags:
if (len(tag) > 4) and (tag[0:5] == 'APIC:'):
self.cover = fin.tags[tag]
filename = [self.uuid]
if self.cover.desc.lower().endswith('.png'):
filename.append('.png')
elif self.cover.desc.lower().endswith('.jpg'):
filename.append('.jpg')
elif self.cover.data[0:4] == b'\x89\x50\x4E\x47':
filename.append('.png')
elif self.cover.data[0:3] == b'\xFF\xD8\xFF':
filename.append('.jpg')
self.cover.filename = ''.join(filename)
break
def write_cover(self, path):
if self.cover is None: return
path = os.path.join(path, self.cover.filename)
fout = open(path, 'wb')
fout.write(self.cover.data)
fout.close()
# ==============================================================================
# artist { indexArtist, name, genre, tags, note }
# album { indexAlbum, indexArtist, title, year, cover, tags, note }
# song { indexSong, indexArtist, indexAlbum, title, track, tags, note, file }
# list { indexList, title, tags, note }
# listSong { indexList, indexSong, track, file } # file used for refresh/rebuild
def database_connect():
global DATABASE_HOST
global DATABASE_USER
global DATABASE_PASS
global DATABASE_STORE
db = SQL.connect(host=DATABASE_HOST, user=DATABASE_USER, passwd=DATABASE_PASS, db=DATABASE_STORE)
database_execute(db, 'SET NAMES utf8')
db.encoding = 'UTF-8'
return db
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def database_execute(db, query, args=None):
cur = db.cursor()
if args is not None:
args = [db.escape(x) for x in args]
query = query.replace('?','{}').format(*args)
cur.execute(query)
result = cur.fetchall() or []
cur.close()
return result
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def database_table_clear(db, table):
cur = db.cursor()
cur.execute('TRUNCATE TABLE {}'.format(table))
cur.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def database_clear_all(db):
global PATH_COVERS
directory_clear(PATH_COVERS)
database_table_clear(db, 'artist')
database_table_clear(db, 'album')
database_table_clear(db, 'song')
# ==============================================================================
def directory_clear(path):
for entry in os.listdir(path):
if entry.lower() == 'missing.png': continue
full = os.path.join(path, entry)
if os.path.isfile(full):
os.unlink(full)
elif os.path.isdir(full):
shutil.rmtree(full)
# ==============================================================================
def library_refresh(db, path):
database_clear_all(db)
database_execute(db, "INSERT INTO artist (indexArtist, name, genre) VALUES (0, 'Compilation', 'Compilation')")
library_scan_dir(db, path)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def library_scan_file(db, path):
global INDEX_SONG
mp3 = MP3(path)
mp3.index_song = INDEX_SONG
INDEX_SONG += 1
if "/Compilations/" in path:
mp3.index_artist = 0
else:
mp3.index_artist = entry_artist_id(db, mp3)
mp3.index_album = entry_album_id(db, mp3)
entry_insert_song(db, mp3)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def library_scan_dir(db, path):
write_path = path
if len(write_path) > 80:
write_path = write_path[0:80]
if len(write_path) < 80:
write_path = ''.join([write_path, ' ' * (80 - len(write_path))])
sys.stdout.write('\b' * 80)
sys.stdout.write(write_path)
sys.stdout.flush()
for entry in os.listdir(path):
if entry[0] == '.': continue
full = os.path.join(path, entry)
if os.path.isfile(full):
if entry.lower().endswith('.mp3'):
library_scan_file(db, full)
else:
library_scan_dir(db, full)
# ==============================================================================
def entry_artist_id(db, mp3):
global INDEX_ARTIST
result = database_execute(db, "SELECT artist.indexArtist FROM artist WHERE (artist.name = ?)", [mp3.artist])
if len(result) == 0:
ret_index = INDEX_ARTIST
database_execute(db, "INSERT INTO artist (indexArtist, name, genre) VALUES (?,?,?)", [ret_index, mp3.artist, mp3.genre])
INDEX_ARTIST += 1
return ret_index
else:
return result[0][0]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def entry_album_id(db, mp3):
global INDEX_ALBUM
global PATH_COVERS
result = database_execute(db, "SELECT album.indexAlbum FROM album WHERE ((album.indexArtist = ?) AND (album.title = ?))",
[mp3.index_artist, mp3.album])
if len(result) == 0:
ret_index = INDEX_ALBUM
if mp3.cover is not None:
mp3.write_cover(PATH_COVERS)
database_execute(db, "INSERT INTO album (indexAlbum, indexArtist, title, year, cover) VALUES (?,?,?,?,?)",
[ret_index, mp3.index_artist, mp3.album, mp3.year,
mp3.cover.filename if (mp3.cover is not None) else 'missing.png'])
INDEX_ALBUM += 1
return ret_index
else:
return result[0][0]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def entry_insert_song(db, mp3):
database_execute(db, "INSERT INTO song (indexSong, indexArtist, indexAlbum, title, track, file) VALUES (?,?,?,?,?,?)",
[mp3.index_song, mp3.index_artist, mp3.index_album, mp3.title, mp3.track, mp3.path])
# ==============================================================================
sys.stdout.write(' ' * 80)
began = time.monotonic()
db = database_connect()
library_refresh(db, PATH_ROOT)
elapsed = int(time.monotonic() - began)
sys.stdout.write('\b' * 80)
print("Processed {} artists, {} albums, {} songs in {} seconds.".format(INDEX_ARTIST-1, INDEX_ALBUM, INDEX_SONG, elapsed))
# ==============================================================================
# ------------------------------------------------------------------------------
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
|
import os
import sys
import urwid
import subprocess
from multiprocessing import Pool
from globalBase import *
import urwidHelper as ur
import myutil
import tool
from tool import git, system, systemSafe, systemRet, programPath
"""
itemList = list of (terminal, attr)
"""
def refreshBtnListTerminal(terimalItemList, listBox, onClick):
del listBox.body[:]
listBox.itemCount = len(terimalItemList)
if listBox.itemCount == 0:
terimalItemList = [("< Nothing > ", None)]
listBox.body += ur.btnListMakeTerminal(terimalItemList, onClick)
class DlgGitCommit(ur.cDialog):
themes = [("greenfg", "greenfg_f"), ("std", "std_f")]
def __init__(self, onExit):
super().__init__()
self.selectFileName = ""
self.onExit = onExit
self.edInput = ur.editGen("Input commit message => ", "", lambda edit, text: self.onMsgChanged(edit, text))
self.widgetFileList = ur.mListBox \
(urwid.SimpleFocusListWalker(ur.btnListMakeTerminal([("< No files >", None)], None)))
self.widgetContent = ur.mListBox(urwid.SimpleListWalker(ur.textListMakeTerminal(["< Nothing to display >"])))
self.headerText = urwid.Text(">> Commit - f11/f12(Prev/Next file) f4(cancel operation)")
self.widgetFrame = urwid.Pile \
([("pack", self.edInput), (8, urwid.AttrMap(self.widgetFileList, 'std')), ('pack', urwid.Divider('-')), self.widgetContent])
self.mainWidget = urwid.Frame(self.widgetFrame, header=self.headerText)
self.refreshFileList()
self.widgetFrame.set_focus(self.edInput)
def onMsgChanged(self, edit, text):
pass
def _applyFileColorTheme(self, widget, isFocus=0):
theme = self.themes[0 if widget.original_widget.attr == "s" else 1]
widget.original_widget.set_label((theme[isFocus], widget.original_widget.origTxt))
def onFileSelected(self, btn):
# why btn.get_label() is impossible?
label = btn.original_widget.get_label()
self.selectFileName = btn.original_widget.get_label()
# g.headerText.set_text("file - " + label)
# display
btnType = btn.original_widget.attr
pp = os.path.join(g.relRoot, self.selectFileName)
try:
ss = system("git diff --color %s \"%s\"" % ("" if btnType == "c" else "--staged", pp))
except subprocess.CalledProcessError as e:
ss = "failed to print diff for %s\n %s" % (pp, e)
ss = ss.replace("\t", " ")
del self.widgetContent.body[:]
self.widgetContent.body += ur.textListMakeTerminal(ss.split("\n"))
self.widgetFrame.set_focus(self.widgetContent)
def refreshFileContentCur(self):
self.onFileSelected(self.widgetFileList.focus)
def refreshFileList(self):
del self.widgetFileList.body[:]
# staged file list
fileList = system("git diff --name-only --cached")
itemList = [ (self.themes[0][0], x, "s") for x in fileList.split("\n") if x.strip() != "" ]
self.widgetFileList.body += ur.btnListMakeMarkup(itemList, lambda btn: self.onFileSelected(btn))
# general file list
fileList = system("git diff --name-only")
itemList = [ (self.themes[1][0], x, "c") for x in fileList.split("\n") if x.strip() != "" ]
self.widgetFileList.body += ur.btnListMakeMarkup(itemList, lambda btn: self.onFileSelected(btn), False)
# for widget in self.widgetFileList.body:
# self._applyFileColorTheme(widget, 0)
if len(self.widgetFileList.body) == 0:
self.widgetFileList.body += ur.btnListMakeTerminal([("< Nothing >", None)], None, False)
# self.onFileFocusChanged(self.widgetFileList.focus_position)
self.onFileSelected(self.widgetFileList.focus) # auto display
def inputFilter(self, keys, raw):
if g.loop.widget != g.dialog.mainWidget:
return keys
if ur.filterKey(keys, "down"):
self.widgetContent.scrollDown()
if ur.filterKey(keys, "up"):
self.widgetContent.scrollUp()
return keys
def unhandled(self, key):
if key == "q" or key == "Q" or key == "f4":
self.close()
elif key == 'k':
self.widgetContent.scrollUp()
elif key == 'j':
self.widgetContent.scrollDown()
elif key == "left" or key == "[" or key == "f11" or key == "h":
self.widgetFileList.focusPrevious()
self.refreshFileContentCur()
if key == "f11":
self.widgetFrame.set_focus(self.edInput)
elif key == "right" or key == "]" or key == "f12" or key == "l":
self.widgetFileList.focusNext()
self.refreshFileContentCur()
if key == "f12":
self.widgetFrame.set_focus(self.edInput)
elif key == "A":
def onAdd():
system("git add \"%s\"" % fname)
self.refreshFileList()
def onPrompt():
g.loop.stop()
systemRet("git add -p \"%s\"" % fname)
g.loop.start()
self.refreshFileList()
btn = self.widgetFileList.focus
fname = myutil.gitFileBtnName(btn)
ur.popupAsk3("Git add", "Do you want to add a file[%s]?" % fname, "Add", "Prompt", "Cancel", onAdd, onPrompt)
elif key == "R":
def onReset():
system("git reset \"%s\"" % fname)
self.refreshFileList()
btn = self.widgetFileList.focus
fname = myutil.gitFileBtnName(btn)
ur.popupAsk("Git reset", "Do you want to reset a file[%s]?" % fname, onReset)
elif key == "D":
def onDrop():
system("git checkout -- \"%s\"" % fname)
self.refreshFileList()
btn = self.widgetFileList.focus
fname = myutil.gitFileBtnName(btn)
ur.popupAsk("Git reset(f)", "Do you want to drop file[%s]s modification?" % fname, onDrop)
elif key == "E":
btn = self.widgetFileList.focus
fname = myutil.gitFileBtnName(btn)
g.loop.stop()
systemRet("vim %s" % fname)
g.loop.start()
self.refreshFileContentCur()
elif key == "esc":
self.widgetFrame.set_focus(self.edInput)
elif key == "ctrl a":
# commit all
def onCommit():
tt = self.edInput.get_edit_text()
ss = system("git commit -a -m \"%s\"" % tt[:-1])
self.close()
ur.popupAsk("Git Commit", "Do you want to commit all modification?", onCommit)
elif key == "enter":
# commit
tt = self.edInput.get_edit_text()
ss = system("git commit -m \"%s\"" % tt)
# print(ss)
self.close()
elif key == "C":
def onCommit():
g.loop.stop()
systemRet("git commit -a")
g.loop.start()
self.refreshFileList()
ur.popupAsk("Git commit(all)", "Do you want to commit all content?", onCommit)
#elif key == "h":
# ur.popupMsg("Dc help", "Felix Felix Felix Felix\nFelix Felix")
class DlgGitStatus(ur.cDialog):
def __init__(self, onExit=None):
super().__init__()
self.onExit = onExit
self.selectFileName = ""
self.widgetFileList = ur.mListBox(
urwid.SimpleFocusListWalker(ur.btnListMakeTerminal([("< No files >", None)], None)))
self.widgetContent = ur.mListBox(urwid.SimpleListWalker(ur.textListMakeTerminal(["< Nothing to display >"])))
self.headerText = urwid.Text(
">> dc stage - q/F4(Quit) h/l(Prev/Next file) j/k(scroll) A(Add) P(Prompt) R(Reset) D(drop) C(Commit) I(Ignore)")
self.widgetFrame = urwid.Pile(
[(8, urwid.AttrMap(self.widgetFileList, 'std')), ('pack', urwid.Divider('-')), self.widgetContent])
self.mainWidget = urwid.Frame(self.widgetFrame, header=self.headerText)
try:
g.gitRoot = system("git rev-parse --show-toplevel")
except subprocess.CalledProcessError:
print("Current folder is no git repo")
raise urwid.ExitMainLoop
g.curPath = os.getcwd()
g.relRoot = "./"
if g.gitRoot != g.curPath:
g.relRoot = os.path.relpath(g.gitRoot, g.curPath)
def init(self):
if not self.refreshFileList():
print("No modified or untracked files")
return False
return True
def onFileSelected(self, btn):
# why btn.get_label() is impossible?
label = btn.original_widget.get_label()
# self.selectFileName = gitFileBtnName(btn)
self.selectFileName = myutil.gitFileLastName(btn)
# g.headerText.set_text("file - " + label)
# display
if label == "< Nothing >":
ss = label
elif label.startswith("?? "):
if os.path.isdir(self.selectFileName):
ss = "%s is folder" % self.selectFileName
else:
try:
with open(self.selectFileName, "r", encoding="UTF-8") as fp:
ss = fp.read()
except UnicodeDecodeError:
# ur.popupMsg("Encoding", "Encoding error[%s]" % self.selectFileName);
ss = "No utf8 file[size:%d]" % os.path.getsize(self.selectFileName)
else:
try:
ss = system("git diff --color \"%s\"" % self.selectFileName)
except subprocess.CalledProcessError as e:
ss = "failed to print diff for %s\n %s" % (self.selectFileName, e)
ss = ss.replace("\t", " ")
del self.widgetContent.body[:]
self.widgetContent.body += ur.textListMakeTerminal(ss.splitlines())
self.widgetFrame.set_focus(self.widgetContent)
def refreshFileContentCur(self):
self.onFileSelected(self.widgetFileList.focus)
def refreshFileList(self, focusMove=0):
itemList = git.statusFileList()
if len(itemList) <= 0:
return False
focusIdx = self.widgetFileList.focus_position
refreshBtnListTerminal(itemList, self.widgetFileList, lambda btn: self.onFileSelected(btn))
size = len(self.widgetFileList.body)
focusIdx += focusMove
if focusIdx >= size:
focusIdx = size - 1
# self.widgetFileList.focus_position = focusIdx
self.widgetFileList.set_focus(focusIdx)
self.onFileSelected(self.widgetFileList.focus) # auto display
return True
def gitGetStagedCount(self):
cnt = 0
for item in self.widgetFileList.body:
if "s" in item.original_widget.attr: # greenfg
cnt += 1
return cnt
def inputFilter(self, keys, raw):
if g.loop.widget != g.dialog.mainWidget:
return keys
if ur.filterKey(keys, "down"):
self.widgetContent.scrollDown()
if ur.filterKey(keys, "up"):
self.widgetContent.scrollUp()
return keys
def unhandled(self, key):
if key == 'f4' or key == "q":
self.close()
elif key == 'k':
self.widgetContent.scrollUp()
elif key == 'j':
self.widgetContent.scrollDown()
elif key == "left" or key == "[" or key == "f11" or key == "h":
self.widgetFileList.focusPrevious()
self.refreshFileContentCur()
elif key == "right" or key == "]" or key == "f12" or key == "l":
self.widgetFileList.focusNext()
self.refreshFileContentCur()
elif key == "A":
btn = self.widgetFileList.focus
# fname = gitFileBtnName(btn)
fname = myutil.gitFileLastName(btn)
system("git add \"%s\"" % fname)
self.refreshFileList(1)
elif key == "P":
def onPrompt():
g.loop.stop()
systemRet("git add -p \"%s\"" % fname)
g.loop.start()
self.refreshFileList()
btn = self.widgetFileList.focus
fname = myutil.gitFileBtnName(btn)
ur.popupAsk("Git add", "Do you want to add a file via prompt[%s]?" % fname, onPrompt)
elif key == "R":
btn = self.widgetFileList.focus
fname = myutil.gitFileBtnName(btn)
system("git reset \"%s\"" % fname)
self.refreshFileList()
elif key == "D":
def onDrop():
system("git checkout -- \"%s\"" % fname)
self.refreshFileList()
def onDelete():
os.remove(fname)
self.refreshFileList()
btn = self.widgetFileList.focus
fname = myutil.gitFileBtnName(btn)
if myutil.gitFileBtnType(btn) == "??":
ur.popupAsk("Git reset(f)", "Do you want to delete file[%s]?" % fname, onDelete)
else:
ur.popupAsk("Git reset(f)", "Do you want to drop file[%s]s modification?" % fname, onDrop)
elif key == "E":
btn = self.widgetFileList.focus
fname = myutil.gitFileBtnName(btn)
g.loop.stop()
systemRet("vim %s" % fname)
g.loop.start()
self.refreshFileContentCur()
elif key == "c_old":
def onCommit():
g.loop.stop()
systemRet("git commit")
g.loop.start()
self.refreshFileList()
ur.popupAsk("Git commit", "Do you want to commit?", onCommit)
elif key == "C":
def onExit():
if not self.refreshFileList():
if getattr(self, "onExit") and self.onExit is not None:
self.close()
return
else:
g.loop.stop()
print("No modified or untracked files")
sys.exit(0)
g.doSetMain(self)
# check staged data
n = self.gitGetStagedCount()
if n == 0:
ur.popupMsg("Alert", "There is no staged file to commit")
return
dlg = DlgGitCommit(onExit)
g.doSetMain(dlg)
elif key == "h":
ur.popupMsg("Dc help", "Felix Felix Felix Felix\nFelix Felix")
|
|
"""
Naming convention:
* item - the visual element in MikiTree
* page - denoted by item hierarchy e.g. `foo/bar` is a subpage of `foo`
* file - the actual file on disk
"""
import os
import datetime
from PyQt5.QtCore import Qt
from PyQt5 import QtCore, QtGui, QtWidgets
"""
from PyQt4.QtCore import Qt, QDir, QFile, QIODevice, QSize, QTextStream
from PyQt4.QtGui import (QAbstractItemView, QCursor, QMenu, QMessageBox, QTreeWidget, QTreeWidgetItem)
"""
from whoosh.index import open_dir
from whoosh.qparser import QueryParser
from whoosh.writing import AsyncWriter
from .config import Setting
from .utils import LineEditDialog, TTPL_COL_DATA, TTPL_COL_EXTRA_DATA
from . import mikitemplate
class MikiTree(QtWidgets.QTreeWidget):
def __init__(self, parent=None):
super(MikiTree, self).__init__(parent)
self.parent = parent
self.settings = parent.settings
self.notePath = self.settings.notePath
self.header().close()
self.setAcceptDrops(True)
self.setDragEnabled(True)
# self.setDropIndicatorShown(True)
self.setDragDropOverwriteMode(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
# self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.contextMenu)
self.nvwCallback = lambda item: None
self.nvwtCallback = lambda item: None
def itemToPage(self, item):
""" get item hierarchy from item """
page = ''
if not hasattr(item, 'text'):
return page
page = item.text(0)
parent = item.parent()
while parent is not None:
page = parent.text(0) + '/' + page
parent = parent.parent()
return page
def pageToItem(self, page):
""" get item from item hierarchy """
# if page is empty return current item
if page == '':
return self.currentItem()
# strip the beginning and ending '/' character
if page[0] == '/':
page = page[1:]
if page[-1] == '/':
page = page[:-1]
# find all items named pieces[-1], then match the page name.
pieces = page.split('/')
itemList = self.findItems(
pieces[-1], Qt.MatchExactly|Qt.MatchRecursive)
if len(itemList) == 1:
return itemList[0]
for item in itemList:
if page == self.itemToPage(item):
return item
def itemToFile(self, item):
return self.pageToFile(self.itemToPage(item))
def pageToFile(self, page):
""" get filepath from page
filepath = notePath + page + fileExt
fileExt is stored in notebook.conf
"""
# When exists foo.md, foo.mkd, foo.markdown,
# the one with defExt will be returned
extName = ['.md', '.mkd', '.markdown']
defExt = self.settings.fileExt
if defExt in extName:
extName.remove(defExt)
else:
print("Warning: detected file extension name is", defExt)
print(" Your config file is located in", self.notePath + "/notebook.conf")
extName.insert(0, defExt)
for ext in extName:
filepath = os.path.join(self.notePath, page + ext)
if QtCore.QFile.exists(filepath):
return filepath
# return filename with default extension name even if file not exists.
return os.path.join(self.notePath, page + defExt)
def itemToHtmlFile(self, item):
""" The corresponding html file path """
page = self.itemToPage(item)
return os.path.join(self.settings.htmlPath, page + ".html")
def itemToAttachmentDir(self, item):
""" The corresponding attachment directory
dirName is constructed by pageName and md5(page), so that no nesting
needed and manipulation become easy
"""
page = self.itemToPage(item)
#path = os.path.join(self.settings.attachmentPath, page)
path = self.settings.attachmentPath+"/"+page
return path
def currentPage(self):
return self.itemToPage(self.currentItem())
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
return
else:
QtWidgets.QTreeWidget.mousePressEvent(self, event)
def contextMenu(self, qpoint):
""" contextMenu shown when right click the mouse """
item = self.itemAt(qpoint)
menu = QtWidgets.QMenu()
if item is None or item.parent() is None:
menu.addAction(self.tr("New Page..."), lambda: self.newPageCore(self, None))
else:
menu.addAction(self.tr("New Page..."), lambda: self.newPageCore(item.parent(), None))
if item is None:
menu.addAction(self.tr("New Subpage..."), lambda: self.newPageCore(self, None))
else:
menu.addAction(self.tr("New Subpage..."), lambda: self.newPageCore(item, None))
if item is None or item.parent() is None:
menu.addAction(self.tr("New page from template..."), lambda: self.newPageCore(self, None, useTemplate=True))
else:
menu.addAction(self.tr("New page from template..."), lambda: self.newPageCore(item.parent(), None, useTemplate=True))
if item is None:
menu.addAction(self.tr("New subpage from template..."), lambda: self.newPageCore(self, None, useTemplate=True))
else:
menu.addAction(self.tr("New subpage from template..."), lambda: self.newPageCore(item, None, useTemplate=True))
menu.addAction(self.tr("View separately"), lambda: self.nvwCallback(item))
menu.addAction(self.tr("View separately (plain text)"), lambda: self.nvwtCallback(item))
menu.addSeparator()
menu.addAction(self.tr("Collapse This Note Tree"),
lambda: self.recurseCollapse(item))
menu.addAction(self.tr("Uncollapse This Note Tree"),
lambda: self.recurseExpand(item))
menu.addAction(self.tr("Collapse All"), self.collapseAll)
menu.addAction(self.tr("Uncollapse All"), self.expandAll)
menu.addSeparator()
menu.addAction(self.tr('Rename Page...'), lambda: self.renamePage(item))
menu.addAction(self.tr("Delete Page"), lambda: self.delPage(item))
menu.exec_(self.mapToGlobal(qpoint))
def newPage(self, name=None):
if self.currentItem() is None:
self.newPageCore(self, name)
else:
parent = self.currentItem().parent()
if parent is not None:
self.newPageCore(parent, name)
else:
self.newPageCore(self, name)
def newSubpage(self, name=None):
item = self.currentItem()
self.newPageCore(item, name)
def newPageCore(self, item, newPageName, useTemplate=False, templateTitle=None, templateBody=None):
pagePath = os.path.join(self.notePath, self.itemToPage(item)).replace(os.sep, '/')
if not newPageName:
if useTemplate:
dialog = mikitemplate.PickTemplateDialog(pagePath, self.settings, parent=self)
if dialog.exec_():
curTitleIdx = dialog.titleTemplates.currentIndex()
curBodyIdx = dialog.bodyTemplates.currentIndex()
dtnow = datetime.datetime.now()
if curTitleIdx > -1:
titleItem = dialog.titleTemplates.model().item(curTitleIdx)
titleItemContent = titleItem.data(TTPL_COL_DATA)
titleItemType = titleItem.data(TTPL_COL_EXTRA_DATA)
titleParameter = dialog.titleTemplateParameter.text()
newPageName = mikitemplate.makeTemplateTitle(titleItemType,
titleItemContent, dtnow=dtnow, userinput=titleParameter)
if curBodyIdx > -1:
bodyItemIdx = dialog.bodyTemplates.rootModelIndex().child(curBodyIdx, 0)
bodyFPath = dialog.bodyTemplates.model().filePath(bodyItemIdx)
else:
bodyFPath = None
else:
dialog = LineEditDialog(pagePath, self)
if dialog.exec_():
newPageName = dialog.editor.text()
prevparitem = None
if newPageName:
if hasattr(item, 'text'):
pagePath = os.path.join(self.notePath,
pagePath + '/').replace(os.sep, '/')
if not QtCore.QDir(pagePath).exists():
QtCore.QDir(self.notePath).mkdir(pagePath)
if not QtCore.QDir(os.path.dirname(newPageName)).exists():
curdirname = os.path.dirname(newPageName)
needed_parents = []
while curdirname != '':
needed_parents.append(curdirname)
curdirname = os.path.dirname(curdirname)
#create the needed hierarchy in reverse order
for i, needed_parent in enumerate(needed_parents[::-1]):
paritem = self.pageToItem(needed_parent)
if paritem is None:
if i == 0:
self.newPageCore(item, os.path.basename(needed_parent))
else:
self.newPageCore(prevparitem, os.path.basename(needed_parent))
QtCore.QDir(pagePath).mkdir(needed_parent)
elif not QtCore.QDir(os.path.join(self.notePath, needed_parent).replace(os.sep, '/')).exists():
QtCore.QDir(pagePath).mkdir(needed_parent)
if paritem is not None:
prevparitem = paritem
else:
prevparitem = self.pageToItem(needed_parent)
fileName = pagePath + newPageName + self.settings.fileExt
fh = QtCore.QFile(fileName)
fh.open(QtCore.QIODevice.WriteOnly)
savestream = QtCore.QTextStream(fh)
if useTemplate and bodyFPath is not None:
with open(bodyFPath, 'r', encoding='utf-8') as templatef:
savestream << mikitemplate.makeTemplateBody(
os.path.basename(newPageName), dtnow=dtnow,
dt_in_body_txt=self.tr("Created {}"),
body=templatef.read())
else:
savestream << mikitemplate.makeDefaultBody(os.path.basename(newPageName), self.tr("Created {}"))
fh.close()
if prevparitem is not None:
QtWidgets.QTreeWidgetItem(prevparitem, [os.path.basename(newPageName)])
else:
QtWidgets.QTreeWidgetItem(item, [os.path.basename(newPageName)])
newItem = self.pageToItem(pagePath + newPageName)
self.sortItems(0, Qt.AscendingOrder)
self.setCurrentItem(newItem)
if hasattr(item, 'text'):
self.expandItem(item)
# create attachment folder if not exist
attDir = self.itemToAttachmentDir(newItem)
if not QtCore.QDir(attDir).exists():
QtCore.QDir().mkpath(attDir)
# TODO improvement needed, can be reused somehow
with open(fileName, 'r') as fileobj:
content = fileobj.read()
self.ix = open_dir(self.settings.indexdir)
#writer = self.ix.writer()
writer = AsyncWriter(self.ix)
writer.add_document(path=pagePath+newPageName, content=content)
writer.commit()
#self.ix.close()
def dropEvent(self, event):
""" A note is related to four parts:
note file, note folder containing child note, parent note folder, attachment folder.
When drag/drop, should take care of:
1. rename note file ("rename" is just another way of saying "move")
2. rename note folder
3. if parent note has no more child, remove parent note folder
4. rename attachment folder
"""
# construct file/folder names before and after drag/drop
sourceItem = self.currentItem()
sourcePage = self.itemToPage(sourceItem)
oldAttDir = self.itemToAttachmentDir(sourceItem)
targetItem = self.itemAt(event.pos())
targetPage = self.itemToPage(targetItem)
oldFile = self.itemToFile(sourceItem)
newFile = os.path.join(targetPage,
sourceItem.text(0) + self.settings.fileExt)
oldDir = sourcePage
newDir = os.path.join(targetPage, sourceItem.text(0))
if QtCore.QFile.exists(newFile):
QtWidgets.QMessageBox.warning(self, self.tr("Error"),
self.tr("File already exists: %s") % newFile)
return
# rename file/folder, remove parent note folder if necessary
if targetPage != '':
QtCore.QDir(self.notePath).mkpath(targetPage)
QtCore.QDir(self.notePath).rename(oldFile, newFile)
if sourceItem.childCount() != 0:
QtCore.QDir(self.notePath).rename(oldDir, newDir)
if sourceItem.parent() is not None:
parentItem = sourceItem.parent()
parentPage = self.itemToPage(parentItem)
if parentItem.childCount() == 1:
QtCore.QDir(self.notePath).rmdir(parentPage)
# pass the event to default implementation
QtWidgets.QTreeWidget.dropEvent(self, event)
self.sortItems(0, Qt.AscendingOrder)
if hasattr(targetItem, 'text'):
self.expandItem(targetItem)
# if attachment folder exists, rename it
if QtCore.QDir().exists(oldAttDir):
# make sure target folder exists
QtCore.QDir().mkpath(self.itemToAttachmentDir(targetItem))
newAttDir = self.itemToAttachmentDir(sourceItem)
QtCore.QDir().rename(oldAttDir, newAttDir)
self.parent.updateAttachmentView()
def renamePage(self, item):
oldAttDir = self.itemToAttachmentDir(item)
parent = item.parent()
parentPage = self.itemToPage(parent)
parentPath = os.path.join(self.notePath, parentPage)
dialog = LineEditDialog(parentPath, self)
dialog.setText(item.text(0))
if dialog.exec_():
newPageName = dialog.editor.text()
# if hasattr(item, 'text'): # if item is not QTreeWidget
if parentPage != '':
parentPage = parentPage + '/'
oldFile = self.itemToFile(item)
newFile = parentPage + newPageName + self.settings.fileExt
QtCore.QDir(self.notePath).rename(oldFile, newFile)
if item.childCount() != 0:
oldDir = parentPage + item.text(0)
newDir = parentPage + newPageName
QtCore.QDir(self.notePath).rename(oldDir, newDir)
item.setText(0, newPageName)
self.sortItems(0, Qt.AscendingOrder)
# if attachment folder exists, rename it
if QtCore.QDir().exists(oldAttDir):
newAttDir = self.itemToAttachmentDir(item)
QtCore.QDir().rename(oldAttDir, newAttDir)
self.parent.updateAttachmentView()
def pageExists(self, noteFullName):
return QtCore.QFile.exists(self.pageToFile(noteFullName))
def delPageWrapper(self):
item = self.currentItem()
self.delPage(item)
def delPage(self, item):
index = item.childCount()
while index > 0:
index = index - 1
self.dirname = item.child(index).text(0)
self.delPage(item.child(index))
# remove attachment folder
attDir = self.itemToAttachmentDir(item)
for info in QtCore.QDir(attDir).entryInfoList():
QtCore.QDir().remove(info.absoluteFilePath())
QtCore.QDir().rmdir(attDir)
pagePath = self.itemToPage(item)
self.ix = open_dir(self.settings.indexdir)
query = QueryParser('path', self.ix.schema).parse(pagePath)
#writer = self.ix.writer()
writer = AsyncWriter(self.ix)
n = writer.delete_by_query(query)
# n = writer.delete_by_term('path', pagePath)
writer.commit()
#self.ix.close()
b = QtCore.QDir(self.notePath).remove(self.pageToFile(pagePath))
parent = item.parent()
parentPage = self.itemToPage(parent)
if parent is not None:
index = parent.indexOfChild(item)
parent.takeChild(index)
if parent.childCount() == 0: # if no child, dir not needed
QtCore.QDir(self.notePath).rmdir(parentPage)
else:
index = self.indexOfTopLevelItem(item)
self.takeTopLevelItem(index)
QtCore.QDir(self.notePath).rmdir(pagePath)
def sizeHint(self):
return QtCore.QSize(200, 0)
def recurseCollapse(self, item):
for i in range(item.childCount()):
a_item = item.child(i)
self.recurseCollapse(a_item)
self.collapseItem(item)
def recurseExpand(self, item):
self.expandItem(item)
for i in range(item.childCount()):
a_item = item.child(i)
self.recurseExpand(a_item)
class TocTree(QtWidgets.QTreeWidget):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.header().close()
def updateToc(self, root, entries):
self.clear()
item = QtWidgets.QTreeWidgetItem(self, [root, '0'])
curLevel = 0
for (level, h, p, a) in entries:
val = [h, str(p), a]
if level == curLevel:
item = QtWidgets.QTreeWidgetItem(item.parent(), val)
elif level < curLevel:
item = QtWidgets.QTreeWidgetItem(item.parent().parent(), val)
curLevel = level
else:
item = QtWidgets.QTreeWidgetItem(item, val)
curLevel = level
self.expandAll()
def sizeHint(self):
return QtCore.QSize(200, 0)
|
|
from __future__ import division, print_function,absolute_import
import pylab as plt
import amitgroup.plot as gr
import numpy as np
import amitgroup as ag
import os
import pnet
import matplotlib.pylab as plot
from pnet.cyfuncs import index_map_pooling
from queue import Queue
def extract(ims,allLayers):
#print(allLayers)
curX = ims
for layer in allLayers:
#print('-------------')
#print(layer)
curX = layer.extract(curX)
#print(np.array(curX).shape)
#print('------------------')
return curX
def partsPool(originalPartsRegion, numParts):
partsGrid = np.zeros((1,1,numParts))
for i in range(originalPartsRegion.shape[0]):
for j in range(originalPartsRegion.shape[1]):
if(originalPartsRegion[i,j]!=-1):
partsGrid[0,0,originalPartsRegion[i,j]] = 1
return partsGrid
def test(ims,labels,net):
yhat = net.classify((ims,1000))
return yhat == labels
#def trainPOP():
if pnet.parallel.main(__name__):
#X = np.load("testMay151.npy")
X = np.load("_3_100*6*6_1000*1*1_Jun_16_danny.npy")
model = X.item()
# get num of Parts
numParts = model['layers'][1]['num_parts']
net = pnet.PartsNet.load_from_dict(model)
allLayer = net.layers
ims,labels = ag.io.load_mnist('training')
trainingDataNum = 1000
firstLayerShape = 6
extractedFeature = extract(ims[0:trainingDataNum],allLayer[0:2])[0]
print(extractedFeature.shape)
extractedFeature = extractedFeature.reshape(extractedFeature.shape[0:3])
partsPlot = np.zeros((numParts,firstLayerShape,firstLayerShape))
partsCodedNumber = np.zeros(numParts)
imgRegion= [[] for x in range(numParts)]
partsRegion = [[] for x in range(numParts)]
for i in range(trainingDataNum):
codeParts = extractedFeature[i]
for m in range(29 - firstLayerShape):
for n in range(29 - firstLayerShape):
if(codeParts[m,n]!=-1):
partsPlot[codeParts[m,n]]+=ims[i,m:m+firstLayerShape,n:n+firstLayerShape]
partsCodedNumber[codeParts[m,n]]+=1
for j in range(numParts):
partsPlot[j] = partsPlot[j]/partsCodedNumber[j]
secondLayerCodedNumber = 0
secondLayerShape = 12
frame = (secondLayerShape - firstLayerShape)/2
frame = int(frame)
totalRange = 29 - firstLayerShape
if 1:
for i in range(trainingDataNum):
codeParts = extractedFeature[i]
for m in range(totalRange)[frame:totalRange - frame]:
for n in range(totalRange)[frame:totalRange - frame]:
if(codeParts[m,n]!=-1):
imgRegion[codeParts[m,n]].append(ims[i, m - frame:m + secondLayerShape - frame,n - frame:n + secondLayerShape - frame])
secondLayerCodedNumber+=1
partsGrid = partsPool(codeParts[m-frame:m+frame + 1,n-frame:n+frame + 1],numParts)
partsRegion[codeParts[m,n]].append(partsGrid)
##second-layer parts
numSecondLayerParts = 10
allPartsLayer = [[pnet.PartsLayer(numSecondLayerParts,(1,1),
settings=dict(outer_frame = 0,
threshold = 5,
sample_per_image = 1,
max_samples=10000,
min_prob = 0.005))]
for i in range(numParts)]
allPartsLayerImg = np.zeros((numParts,numSecondLayerParts,secondLayerShape,secondLayerShape))
allPartsLayerImgNumber = np.zeros((numParts,numSecondLayerParts))
zeroParts = 0
imgRegionPool = [[] for i in range(numParts * numSecondLayerParts)]
for i in range(numParts):
if(not partsRegion[i]):
continue
allPartsLayer[i][0].train_from_samples(np.array(partsRegion[i]),None)
extractedFeaturePart = extract(np.array(partsRegion[i],dtype = np.uint8),allPartsLayer[i])[0]
print(extractedFeaturePart.shape)
for j in range(len(partsRegion[i])):
if(extractedFeaturePart[j,0,0,0]!=-1):
partIndex = extractedFeaturePart[j,0,0,0]
allPartsLayerImg[i,partIndex]+=imgRegion[i][j]
imgRegionPool[i * numSecondLayerParts + partIndex].append(imgRegion[i][j])
allPartsLayerImgNumber[i,partIndex]+=1
else:
zeroParts+=1
for i in range(numParts):
for j in range(numSecondLayerParts):
if(allPartsLayerImgNumber[i,j]):
allPartsLayerImg[i,j] = allPartsLayerImg[i,j]/allPartsLayerImgNumber[i,j]
"""
Visualize the SuperParts
"""
settings = {'interpolation':'nearest','cmap':plot.cm.gray,}
settings['vmin'] = 0
settings['vmax'] = 1
plotData = np.ones(((2 + secondLayerShape)*100+2,(2+secondLayerShape)*(numSecondLayerParts + 1)+2))*0.8
visualShiftParts = 0
if 0:
allPartsPlot = np.zeros((20,numSecondLayerParts + 1,12,12))
gr.images(partsPlot.reshape(numParts,6,6),zero_to_one=False,vmin = 0, vmax = 1)
allPartsPlot[:,0] = 0.5
allPartsPlot[:,0,3:9,3:9] = partsPlot[20:40]
allPartsPlot[:,1:,:,:] = allPartsLayerImg[20:40]
gr.images(allPartsPlot.reshape(20 * (numSecondLayerParts + 1),12,12),zero_to_one=False, vmin = 0, vmax =1)
elif 1:
for i in range(numSecondLayerParts + 1):
for j in range(100):
if i == 0:
plotData[5 + j * (2 + secondLayerShape):5+firstLayerShape + j * (2 + secondLayerShape), 5 + i * (2 + secondLayerShape): 5+firstLayerShape + i * (2 + secondLayerShape)] = partsPlot[j+visualShiftParts]
else:
plotData[2 + j * (2 + secondLayerShape):2 + secondLayerShape+ j * (2 + secondLayerShape),2 + i * (2 + secondLayerShape): 2+ secondLayerShape + i * (2 + secondLayerShape)] = allPartsLayerImg[j+visualShiftParts,i-1]
plot.figure(figsize=(10,40))
plot.axis('off')
plot.imshow(plotData, **settings)
plot.savefig('test3.pdf',format='pdf',dpi=900)
else:
pass
"""
Train A Class-Model Layer
Building a parts-layer consisting all the components from all group s
"""
secondLayerPartsLayer = [pnet.PartsLayer(numSecondLayerParts * numParts,(1,1),settings = dict(outer_frame = 0, threshold = 5, sample_per_image = 1, max_samples=10000, min_prob = 0.005))]
secondLayerPartsLayer[0]._parts = np.asarray([allPartsLayer[i][0]._parts for i in range(numParts)])
secondLayerPartsLayer[0]._parts = secondLayerPartsLayer[0]._parts.reshape((numSecondLayerParts * numParts,)+secondLayerPartsLayer[0]._parts.shape[2:])
digits = range(10)
sup_ims = []
sup_labels = []
classificationTrainingNum = 100
for d in digits:
ims0 = ag.io.load_mnist('training', [d], selection = slice(classificationTrainingNum), return_labels = False)
sup_ims.append(ims0)
sup_labels.append(d * np.ones(len(ims0),dtype = np.int64))
sup_ims = np.concatenate(sup_ims, axis = 0)
sup_labels = np.concatenate(sup_labels,axis = 0)
curX = extract(sup_ims,allLayer[0:2])[0]
#print(curX.shape)
curX = curX.reshape(curX.shape[0:3])
secondLevelCurx = np.zeros((10 * classificationTrainingNum,29 - secondLayerShape,29 - secondLayerShape,1,1,numParts))
secondLevelCurxCenter = np.zeros((10 * classificationTrainingNum,29- secondLayerShape,29 - secondLayerShape))
#for i in range(10 * classificationTrainingNum):
# codeParts = curX[i]
for m in range(totalRange)[frame:totalRange-frame]:
for n in range(totalRange)[frame:totalRange-frame]:
secondLevelCurx[:,m-frame,n-frame] = index_map_pooling(curX[:,m-frame:m+frame+1,n-frame:n+frame+1],numParts,(2 * frame + 1,2 * frame + 1),(2 * frame + 1,2 * frame + 1))
secondLevelCurxCenter[:,m-frame,n-frame] = curX[:,m,n]
secondLevelCurx = np.asarray(secondLevelCurx.reshape(secondLevelCurx.shape[0],29- secondLayerShape,29-secondLayerShape,numParts),dtype = np.uint8)
thirdLevelCurx = np.zeros((10 * classificationTrainingNum, 29 - secondLayerShape,29 - secondLayerShape))
thirdLevelCurx = extract(secondLevelCurx,secondLayerPartsLayer)[0]
print(thirdLevelCurx.shape)
print("+++++++++++++++++++++++++++++++++++++++++++++")
if 1:
classificationLayers = [
pnet.PoolingLayer(shape = (4,4),strides = (4,4)),
#pnet.MixtureClassificationLayer(n_components = 5, min_prob = 1e-7, block_size = 20)
pnet.SVMClassificationLayer(C=1.0)
]
classificationNet = pnet.PartsNet(classificationLayers)
classificationNet.train((np.array(thirdLevelCurx,dtype = np.int64),int(numParts * numSecondLayerParts)),sup_labels[:])
print("Training Success!!")
if 1:
testImg,testLabels = ag.io.load_mnist('testing')
testingNum = testLabels.shape[0]
print("training extract Begin")
curTestX = extract(testImg, allLayer[0:2])[0]
print("training extract End")
curTestX = curTestX.reshape(curTestX.shape[0:3])
secondLevelCurTestX = np.zeros((testingNum, 29 - secondLayerShape,29 - secondLayerShape,1,1,numParts))
secondLevelCurTestXCenter = np.zeros((testingNum, 29 - secondLayerShape,29 - secondLayerShape))
import time
start = time.time()
#for i in range(testingNum):
# codeParts = curTestX[i]
for m in range(totalRange)[frame:totalRange - frame]:
for n in range(totalRange)[frame:totalRange-frame]:
secondLevelCurTestX[:,m-frame,n-frame] = index_map_pooling(curTestX[:,m-frame:m+frame + 1,n-frame:n+frame + 1],numParts,(2 * frame + 1,2 * frame + 1),(2 * frame + 1,2 * frame + 1))
secondLevelCurTestXCenter[:,m-frame,n-frame] = curTestX[:,m,n]
afterPool = time.time()
print(afterPool - start)
secondLevelCurTestX = np.asarray(secondLevelCurTestX.reshape(secondLevelCurTestX.shape[0],29 - secondLayerShape, 29 - secondLayerShape, numParts),dtype = np.uint8)
thirdLevelCurTestX = np.zeros((testingNum, 29 - secondLayerShape, 29 - secondLayerShape))
featureMap = [[] for i in range(numParts)]
thirdLevelCurTestX = extract(secondLevelCurTestX,secondLayerPartsLayer)[0]
end = time.time()
print(end-afterPool)
print(thirdLevelCurTestX.shape)
testImg_Input = np.array(thirdLevelCurTestX,dtype = np.int64)
testImg_batches = np.array_split(testImg_Input,200)
testLabels_batches = np.array_split(testLabels, 200)
args = [tup + (classificationNet,) for tup in zip(testImg_batches,testLabels_batches)]
corrects = 0
total = 0
def format_error_rate(pr):
return "{:.2f}%".format(100 * (1-pr))
print("Testing Starting...")
for i, res in enumerate(pnet.parallel.starmap_unordered(test,args)):
if i !=0 and i % 20 ==0:
print("{0:05}/{1:05} Error rate: {2}".format(total, len(ims),format_error_rate(pr)))
corrects += res.sum()
total += res.size
pr = corrects / total
print("Final error rate:", format_error_rate(pr))
for i in range(numParts):
print(np.asarray(partsRegion[i]).shape)
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.designate import utils
from rally.task import validation
"""Basic scenarios for Designate."""
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_list_domains",
platform="openstack")
class CreateAndListDomains(utils.DesignateScenario):
def run(self):
"""Create a domain and list all domains.
Measure the "designate domain-list" command performance.
If you have only 1 user in your context, you will
add 1 domain on every iteration. So you will have more
and more domain and will be able to measure the
performance of the "designate domain-list" command depending on
the number of domains owned by users.
"""
domain = self._create_domain()
msg = "Domain isn't created"
self.assertTrue(domain, msg)
list_domains = self._list_domains()
self.assertIn(domain, list_domains)
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="DesignateBasic.list_domains",
platform="openstack")
class ListDomains(utils.DesignateScenario):
def run(self):
"""List Designate domains.
This simple scenario tests the designate domain-list command by listing
all the domains.
Suppose if we have 2 users in context and each has 2 domains
uploaded for them we will be able to test the performance of
designate domain-list command in this case.
"""
self._list_domains()
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_delete_domain",
platform="openstack")
class CreateAndDeleteDomain(utils.DesignateScenario):
def run(self):
"""Create and then delete a domain.
Measure the performance of creating and deleting domains
with different level of load.
"""
domain = self._create_domain()
self._delete_domain(domain["id"])
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_update_domain",
platform="openstack")
class CreateAndUpdateDomain(utils.DesignateScenario):
def run(self):
"""Create and then update a domain.
Measure the performance of creating and updating domains
with different level of load.
"""
domain = self._create_domain()
self._update_domain(domain)
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_delete_records",
platform="openstack")
class CreateAndDeleteRecords(utils.DesignateScenario):
def run(self, records_per_domain=5):
"""Create and then delete records.
Measure the performance of creating and deleting records
with different level of load.
:param records_per_domain: Records to create pr domain.
"""
domain = self._create_domain()
records = []
for i in range(records_per_domain):
record = self._create_record(domain)
records.append(record)
for record in records:
self._delete_record(
domain["id"], record["id"])
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="DesignateBasic.list_records",
platform="openstack")
class ListRecords(utils.DesignateScenario):
def run(self, domain_id):
"""List Designate records.
This simple scenario tests the designate record-list command by listing
all the records in a domain.
Suppose if we have 2 users in context and each has 2 domains
uploaded for them we will be able to test the performance of
designate record-list command in this case.
:param domain_id: Domain ID
"""
self._list_records(domain_id)
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_list_records",
platform="openstack")
class CreateAndListRecords(utils.DesignateScenario):
def run(self, records_per_domain=5):
"""Create and then list records.
If you have only 1 user in your context, you will
add 1 record on every iteration. So you will have more
and more records and will be able to measure the
performance of the "designate record-list" command depending on
the number of domains/records owned by users.
:param records_per_domain: Records to create pr domain.
"""
domain = self._create_domain()
for i in range(records_per_domain):
self._create_record(domain)
self._list_records(domain["id"])
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup": ["designate"]},
name="DesignateBasic.create_and_list_servers",
platform="openstack")
class CreateAndListServers(utils.DesignateScenario):
def run(self):
"""Create a Designate server and list all servers.
If you have only 1 user in your context, you will
add 1 server on every iteration. So you will have more
and more server and will be able to measure the
performance of the "designate server-list" command depending on
the number of servers owned by users.
"""
server = self._create_server()
self.assertTrue(server)
list_servers = self._list_servers()
self.assertIn(server, list_servers)
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup": ["designate"]},
name="DesignateBasic.create_and_delete_server",
platform="openstack")
class CreateAndDeleteServer(utils.DesignateScenario):
def run(self):
"""Create and then delete a server.
Measure the performance of creating and deleting servers
with different level of load.
"""
server = self._create_server()
self._delete_server(server["id"])
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="DesignateBasic.list_servers", platform="openstack")
class ListServers(utils.DesignateScenario):
def run(self):
"""List Designate servers.
This simple scenario tests the designate server-list command by listing
all the servers.
"""
self._list_servers()
# NOTE: API V2
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_list_zones",
platform="openstack")
class CreateAndListZones(utils.DesignateScenario):
def run(self):
"""Create a zone and list all zones.
Measure the "openstack zone list" command performance.
If you have only 1 user in your context, you will
add 1 zone on every iteration. So you will have more
and more zone and will be able to measure the
performance of the "openstack zone list" command depending on
the number of zones owned by users.
"""
zone = self._create_zone()
self.assertTrue(zone)
list_zones = self._list_zones()
self.assertIn(zone, list_zones)
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="DesignateBasic.list_zones", platform="openstack")
class ListZones(utils.DesignateScenario):
def run(self):
"""List Designate zones.
This simple scenario tests the openstack zone list command by listing
all the zones.
"""
self._list_zones()
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_delete_zone",
platform="openstack")
class CreateAndDeleteZone(utils.DesignateScenario):
def run(self):
"""Create and then delete a zone.
Measure the performance of creating and deleting zones
with different level of load.
"""
zone = self._create_zone()
self._delete_zone(zone["id"])
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="DesignateBasic.list_recordsets",
platform="openstack")
class ListRecordsets(utils.DesignateScenario):
def run(self, zone_id):
"""List Designate recordsets.
This simple scenario tests the openstack recordset list command by
listing all the recordsets in a zone.
:param zone_id: Zone ID
"""
self._list_recordsets(zone_id)
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("zones"))
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_delete_recordsets",
platform="openstack")
class CreateAndDeleteRecordsets(utils.DesignateScenario):
def run(self, recordsets_per_zone=5):
"""Create and then delete recordsets.
Measure the performance of creating and deleting recordsets
with different level of load.
:param recordsets_per_zone: recordsets to create pr zone.
"""
zone = random.choice(self.context["tenant"]["zones"])
recordsets = []
for i in range(recordsets_per_zone):
recordset = self._create_recordset(zone)
recordsets.append(recordset)
for recordset in recordsets:
self._delete_recordset(
zone["id"], recordset["id"])
@validation.add("required_services",
services=[consts.Service.DESIGNATE])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("zones"))
@scenario.configure(context={"cleanup": ["designate"]},
name="DesignateBasic.create_and_list_recordsets",
platform="openstack")
class CreateAndListRecordsets(utils.DesignateScenario):
def run(self, recordsets_per_zone=5):
"""Create and then list recordsets.
If you have only 1 user in your context, you will
add 1 recordset on every iteration. So you will have more
and more recordsets and will be able to measure the
performance of the "openstack recordset list" command depending on
the number of zones/recordsets owned by users.
:param recordsets_per_zone: recordsets to create pr zone.
"""
zone = random.choice(self.context["tenant"]["zones"])
for i in range(recordsets_per_zone):
self._create_recordset(zone)
self._list_recordsets(zone["id"])
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deterministic input pipeline."""
import dataclasses
from typing import Callable, Dict, Optional, Union
from clu import deterministic_data
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
# Register spherical_mnist so that tfds.load works.
import spin_spherical_cnns.spherical_mnist.spherical_mnist # pylint: disable=unused-import
Tensor = Union[tf.Tensor, tf.SparseTensor, tf.RaggedTensor]
Features = Dict[str, Tensor]
# Dataset creation functions return info, train, validation and test sets.
@dataclasses.dataclass
class DatasetSplits:
info: tfds.core.DatasetInfo
train: tf.data.Dataset
validation: tf.data.Dataset
test: tf.data.Dataset
def create_datasets(config,
data_rng):
"""Create datasets for training and evaluation.
For the same data_rng and config this will return the same datasets. The
datasets only contain stateless operations.
Args:
config: Configuration to use.
data_rng: PRNGKey for seeding operations in the training dataset.
Returns:
A DatasetSplits object containing the dataset info, and the train,
validation, and test splits.
"""
if config.dataset == "tiny_dummy":
return _create_dataset_tiny_dummy(config)
if config.dataset in ["spherical_mnist/rotated", "spherical_mnist/canonical"]:
return _create_dataset_spherical_mnist(config, data_rng)
else:
raise ValueError(f"Dataset {config.dataset} not supported.")
def _create_dataset_tiny_dummy(
config):
"""Create low-resolution dataset for testing. See create_datasets()."""
size = 100
resolution = 8
n_spins = 1
n_channels = 1
num_classes = 10
shape = (size, resolution, resolution, n_spins, n_channels)
entries = np.linspace(-1, 1, np.prod(shape), dtype=np.float32).reshape(shape)
labels = np.resize(np.arange(num_classes), [size])
train_dataset = tf.data.Dataset.from_tensor_slices({"input": entries,
"label": labels})
train_dataset = train_dataset.batch(config.per_device_batch_size,
drop_remainder=True)
train_dataset = train_dataset.batch(jax.local_device_count(),
drop_remainder=True)
features = tfds.features.FeaturesDict(
{"label": tfds.features.ClassLabel(num_classes=num_classes)})
builder = tfds.testing.DummyDataset()
dataset_info = tfds.core.DatasetInfo(builder=builder, features=features)
# We don't really care about the difference between train, validation and test
# and for dummy data.
return DatasetSplits(info=dataset_info,
train=train_dataset,
validation=train_dataset.take(5),
test=train_dataset.take(5))
def _preprocess_spherical_mnist(features):
features["input"] = tf.cast(features["image"], tf.float32) / 255.0
# Add dummy spin dimension.
features["input"] = features["input"][Ellipsis, None, :]
features.pop("image")
return features
def create_train_dataset(
config,
dataset_builder,
split,
data_rng,
preprocess_fn = None,
):
"""Create train dataset."""
# This ensures determinism in distributed setting.
train_split = deterministic_data.get_read_instruction_for_host(
split, dataset_info=dataset_builder.info)
train_dataset = deterministic_data.create_dataset(
dataset_builder,
split=train_split,
rng=data_rng,
preprocess_fn=preprocess_fn,
shuffle_buffer_size=config.shuffle_buffer_size,
batch_dims=[jax.local_device_count(), config.per_device_batch_size],
num_epochs=config.num_epochs,
shuffle=True,
)
options = tf.data.Options()
options.experimental_external_state_policy = (
tf.data.experimental.ExternalStatePolicy.WARN)
train_dataset = train_dataset.with_options(options)
return train_dataset
def create_eval_dataset(
config,
dataset_builder,
split,
preprocess_fn = None,
):
"""Create evaluation dataset (validation or test sets)."""
# This ensures the correct number of elements in the validation sets.
num_validation_examples = (
dataset_builder.info.splits[split].num_examples)
eval_split = deterministic_data.get_read_instruction_for_host(
split, dataset_info=dataset_builder.info, drop_remainder=False)
eval_num_batches = None
if config.eval_pad_last_batch:
# This is doing some extra work to get exactly all examples in the
# validation split. Without this the dataset would first be split between
# the different hosts and then into batches (both times dropping the
# remainder). If you don't mind dropping a few extra examples you can omit
# the `pad_up_to_batches` argument.
eval_batch_size = jax.local_device_count() * config.per_device_batch_size
eval_num_batches = int(np.ceil(num_validation_examples /
eval_batch_size /
jax.process_count()))
return deterministic_data.create_dataset(
dataset_builder,
split=eval_split,
# Only cache dataset in distributed setup to avoid consuming a lot of
# memory in Colab and unit tests.
cache=jax.process_count() > 1,
batch_dims=[jax.local_device_count(), config.per_device_batch_size],
num_epochs=1,
shuffle=False,
preprocess_fn=preprocess_fn,
pad_up_to_batches=eval_num_batches,
)
def _create_dataset_spherical_mnist(
config,
data_rng):
"""Create Spherical MNIST. See create_datasets()."""
dataset_loaded = False
if not dataset_loaded:
dataset_builder = tfds.builder("spherical_mnist")
if config.dataset == "spherical_mnist/rotated":
train_split = "train_rotated"
validation_split = "validation_rotated"
test_split = "test_rotated"
elif config.dataset == "spherical_mnist/canonical":
train_split = "train_canonical"
validation_split = "validation_canonical"
test_split = "test_canonical"
else:
raise ValueError(f"Unrecognized dataset: {config.dataset}")
if config.combine_train_val_and_eval_on_test:
train_split = f"{train_split} + {validation_split}"
train_dataset = create_train_dataset(
config,
dataset_builder,
train_split,
data_rng,
preprocess_fn=_preprocess_spherical_mnist)
validation_dataset = create_eval_dataset(
config,
dataset_builder,
validation_split,
preprocess_fn=_preprocess_spherical_mnist)
test_dataset = create_eval_dataset(
config,
dataset_builder,
test_split,
preprocess_fn=_preprocess_spherical_mnist)
return DatasetSplits(info=dataset_builder.info,
train=train_dataset,
validation=validation_dataset,
test=test_dataset)
|
|
#! /usr/bin/env ipython
'''
this will load in info/data relevant for a specific cluster
'''
#adam-note# a derivative of adam_plot_radial_shear_profile.py
from readtxtfile import readtxtfile
import astropy
import astropy.io.fits
pyfits=astropy.io.fits
from astropy.io import ascii
import sys,os,re,string,time
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
import ldac
import commands #commands.getoutput
sys.path.append('/u/ki/awright/quick/pythons')
import imagetools, cattools, pipelinetools
#
ns=globals()
#adam-useful# document this later, it's helpful!
myfgas_worklist = readtxtfile('/u/ki/awright/gravitas/maxlikelensing/FgasThesis_sample_mine.list')
myfgas_clusters = np.array([x[0] for x in myfgas_worklist])
myfgas_filters = np.array([x[1] for x in myfgas_worklist])
myfgas_lenses = np.array([x[2] for x in myfgas_worklist])
othersfgas_worklist = readtxtfile('/u/ki/awright/gravitas/maxlikelensing/FgasThesis_sample_not_mine.list')
othersfgas_clusters = np.array([x[0] for x in othersfgas_worklist])
othersfgas_filters = np.array([x[1] for x in othersfgas_worklist])
othersfgas_lenses = np.array([x[2] for x in othersfgas_worklist])
allfgas_clusters= np.append(myfgas_clusters, othersfgas_clusters)
allfgas_filters= np.append(myfgas_filters, othersfgas_filters)
allfgas_lenses= np.append(myfgas_lenses, othersfgas_lenses)
gpfsSUBARUDIR='/gpfs/slac/kipac/fs1/u/awright/SUBARU/'
ki05SUBARUDIR='/nfs/slac/g/ki/ki05/anja/SUBARU/'
ki18SUBARUDIR='/u/ki/awright/data/'
## define some things for making latex tables:
filter2tex={ 'W-J-B':'{\it B}$_{\rm J}$', 'W-J-V':'{\it V}$_{\rm J}$', 'W-C-RC':'{\it R}$_{\rm C}$', 'W-C-IC':'{\it I}$_{\rm C}$', 'W-S-I+':'{\it i}$^{+}$','W-S-Z+':'{\it z}$^{+}$'}
MegaPrime_filter2tex={ 'u':'{\it u}$^{\star}$', 'g':'{\it g}$^{\star}$', 'r':'{\it r}$^{\star}$', 'i':'{\it i}$^{\star}$', 'z':'{\it z}$^{\star}$'}
filters_BVRIZ=['W-J-B', 'W-J-V', 'W-C-RC', 'W-C-IC', 'W-S-I+', 'W-S-Z+']
my_clusters_ra_dec={
'Zw2089': ('9:00:36.882','+20:53:40.361'),
'MACS0429-02': ( '04:29:36.001', '-02:53:05.63' ),
'MACS1115+01': ( '11:15:51.881', '01:29:54.98' ),
'RXJ2129': ( '21:29:39.727', '00:05:18.15' )
}
cl_short2long_names={
## my clusters
"MACS1115+01": "MACSJ1115.8+0129",
"Zw2089":"Zw2089",
'RXJ2129':"RXJ2129.6+0005",
'MACS0429-02':"MACSJ0429.6-0253",
## fgas_others
"MACS1423+24":"MACSJ1423.8+2404",
"MACS1532+30":"RXJ1532.9+3021", #"MACSJ1532.8+3021",
"MACS1621+38":"MACSJ1621.3+3810",
"MACS1720+35":"MACSJ1720.2+3536",
"MACS2140-23":"MS2137.3-2353",
"MACS1347-11":"RXJ1347.5-1145"
}
#MACSJ2140.2-2339
#MACSJ1347.5-1144
##Post-thesis:
#MACS1427+44:"MACSJ1427.2+4407"
#a1835.lenkrb_auto_renew: Error checking etime (no ticket?)
#a2204.lens
##megaprime only
#3C295
##missing bands
#MACS0159-08
#Zw2701
class my_cluster(object):
try:
''' get the properties of this cluster '''
## here we define global files (lookup tables, for example) as Class Attributes (as opposed to instance attributes starting with self.)
fl_redshift="/gpfs/slac/kipac/fs1/u/awright/SUBARU/clusters.redshifts"
fl_lensmag='/u/ki/awright/gravitas/ldaclensing/lensing_coadd_type_filter.list'
#dir_pzwork = gpfsSUBARUDIR + "/fgas_pz_masses/catalog_backup_thesis_2019-05-27/"
dir_pzwork = '/gpfs/slac/kipac/fs1/u/awright/SUBARU/fgas_thesis_backup_catalogs_06-03-2019/'
pixscale = 0.2
## naming conventions:
#dir_* : directory name
#fl_* : filename
#fo_* : file object
#cat_* : catalog file object
#clprop_* : cluster property
#lensprop_* : lensing image header property
def __init__(self,cluster,filter,lens,dir_pzwork=dir_pzwork):
self.cluster = cluster
self.filter = filter
self.lens = lens
self.props=dict(cluster = cluster,filter = filter,lens = lens,SUBARUDIR=gpfsSUBARUDIR,dir_pzwork=self.dir_pzwork)
self.clquick='.'.join([cluster,filter,lens]) #tag='_old_unmatched'
tag=''
self.dir_cluster= "/%(SUBARUDIR)s/%(cluster)s/" % (self.props)
self.adam=1
if not os.path.isdir(self.dir_cluster):
#if self.cluster in othersfgas_clusters:
self.adam=0
#adam-Warning# I think dir_pzwork will have to change:
self.props=dict(cluster = cluster,filter = filter,lens = lens,SUBARUDIR=ki05SUBARUDIR,dir_pzwork=self.dir_pzwork)
self.dir_lens_coadd = "/%(SUBARUDIR)s/%(cluster)s/%(filter)s/SCIENCE/coadd_%(cluster)s_%(lens)s/" % (self.props)
self.dir_photom = "/%(SUBARUDIR)s/%(cluster)s/PHOTOMETRY_%(filter)s_aper/" % (self.props)
self.dir_lens = "/%(SUBARUDIR)s/%(cluster)s/LENSING_%(filter)s_%(filter)s_aper/%(lens)s/" % (self.props)
assert(os.path.isdir(self.dir_photom))
assert(os.path.isdir(self.dir_lens))
self.fl_image_lens = "/%(SUBARUDIR)s/%(cluster)s/%(filter)s/SCIENCE/coadd_%(cluster)s_%(lens)s/coadd.fits" % (self.props)
self.fo_image_lens = astropy.io.fits.open(self.fl_image_lens)[0]
self.header_lens = self.fo_image_lens.header
self.lensprop_exptime=self.header_lens['EXPTIME']
self.lensprop_seeing=self.header_lens['SEEING']
self.fl_cut_lensing = self.dir_lens + "cut_lensing.cat" +tag
self.fl_cut_lensing_step = self.dir_lens + "cut_lensing.cat_step" +tag
self.fl_prof = self.dir_lens + "%(cluster)s_rm1.5_ri0.75_ro3.0_c4.out.prof" % (self.props) +tag
# '/u/ki/dapple/subaru/doug/publication/ccmass_2012-07-31'
self.fl_mass = self.dir_lens + "%(cluster)s_rm1.5_ri0.75_ro3.0_c4.out" % (self.props) +tag
if not self.adam:
# use like compare_masses.readAnjaMasses('/u/ki/dapple/subaru/doug/publication/ccmass_2012-07-31/')
self.fl_mass = "/u/ki/dapple/subaru/doug/publication/ccmass_2012-07-31/%(cluster)s.%(filter)s.%(lens)s.out" % (self.props) +tag
## load in things from ~/gravitas/maxlikelensing/adam_outline.sh
self.fl_cosmos_rscut = self.dir_lens + '/cosmos_rscut.cat'
self.fl_cat_rs = self.dir_lens + "%(cluster)s_redsequence.cat" % (self.props) +tag
self.fl_cat_lensbase = self.dir_lens + "coadd_photo.cat" +tag
if self.adam:
self.fl_photom = self.dir_photom + "%(cluster)s.calibrated_PureStarCalib.cat" % (self.props)
self.fl_photom_alter = self.dir_photom + "%(cluster)s.calibrated_PureStarCalib.alter.cat" % (self.props)
else:
self.fl_photom = self.dir_photom + "%(cluster)s.slr.cat" % (self.props)
self.fl_photom_alter = self.dir_photom + "%(cluster)s.slr.alter.cat" % (self.props)
if self.adam:
self.seeing_mean_rh_entry = commands.getoutput('grep "%(cluster)s" %(dir_pzwork)s/cluster.seeing.dat' % (self.props))
self.seeing_mean_rh = float(self.seeing_mean_rh_entry.split(lens)[-1])
self.seeing_mean_rh_arcsec = self.pixscale * (2.0 * self.seeing_mean_rh)
self.fl_outputpdz= "%(dir_pzwork)s/%(cluster)s.%(filter)s.pdz.cat" % (self.props)
self.fl_bpzcat= "%(dir_pzwork)s/%(cluster)s.%(filter)s.bpz.tab" % (self.props)
# get lensing mag
fo_lensmag=open(self.fl_lensmag)
lensmag_lines=fo_lensmag.readlines()
for line in lensmag_lines:
if line.startswith(self.cluster) and self.filter in line and self.lens in line:
line=line.strip()
lensmag=line.split(' ')[-1]
if self.filter in lensmag:
self.lensmag=lensmag.strip()
break
else:
raise Exception('line with our self.cluster in '+self.fl_lensmag+' doesnt match expected format')
####################################
## get redshift
####################################
fo_redshift=open(self.fl_redshift)
redshift_lines=fo_redshift.readlines()
for zl in redshift_lines:
if zl.startswith(self.cluster):
zline=zl.strip()
znum=zline.split(' ')[-1]
if imagetools.isfloat(znum):
self.clprop_z=float(znum)
break
else:
raise Exception('line with our self.cluster in '+self.fl_redshift+' doesnt match expected format')
try:
print self.cluster,'at redshift',self.clprop_z
self.z_cluster=self.clprop_z #make it backwards compatible after name convention change
fo_redshift.close()
except:
raise Exception('line with our self.cluster is missing from '+self.fl_redshift)
## this gets the cut_lensing.cat file openned if it's needed
def __getattr__(self,key):
'''
What can we do here is get the value when key doesn't already exist
There are two attribute access methods in python classes: __getattribute__ and __getattr__. The first is called every time an attribute lookup is attempted, the second is called only when the normal attribute lookup system fails (including lookups in superclasses).
'''
print key
if key=='_cat_lens':
self.set_cat_lens()
return self.get_cat_lens()
def get_cat_lens(self):
return self._cat_lens
def set_cat_lens(self):
print("opening "+self.fl_cut_lensing)
self._cat_lens=ldac.openObjectFile(self.fl_cut_lensing)
cat_lens = property(get_cat_lens,set_cat_lens)
def get_cuts(self):
print '''
import apply_cuts
mycuts=apply_cuts.ApplyCuts(cluster,filter,lens,workdir)
datacuts=mycuts.dataCuts()
photozcuts=mycuts.photozCuts()
'''
def get_latex_table_line(self):
# Cluster & $z_{\rm Cl}$ & R.A. & Dec. & Filter Bands & Lensing Band \\
# & & (J2000) & (J2000) & & (exp. time [s], seeing [\arcsec]) \\
cluster_line = cl_short2long_names[self.cluster]
z_line = '%.3f' % (self.clprop_z)
if self.adam:
self.ra , self.dec = my_clusters_ra_dec[self.cluster]
if not self.__dict__.has_key('k_bands'):
self.get_best_mags()
filters_line = ''.join([filter2tex[k_band] for k_band in self.k_bands])
lens_filter = filter2tex[self.filter]
lens_filter_line='%s (%s,%.2f)' % (lens_filter,int(self.lensprop_exptime),self.lensprop_seeing)
cols=[cluster_line,z_line, self.ra , self.dec, filters_line, lens_filter_line]
print repr(' & '.join(cols))
#print self.cluster, ': seeing_mean_rh_arcsec=',self.seeing_mean_rh_arcsec, 'lensprop_seeing=',self.lensprop_seeing
return cols
else:
oldline = commands.getoutput('grep "%s" doug_thesis_latex_table.tex' % (cluster_line))
oldline2 = commands.getoutput('grep "%s" doug_fgas4_table.tex' % (cluster_line))
cluster_nums_only=re.sub("\D", "", self.cluster)
oldline3 = commands.getoutput('grep "%s" doug_fgas4_table.tex' % (cluster_nums_only))
if oldline2:
print oldline2
return oldline2
elif oldline:
print oldline
return oldline
elif oldline3:
print oldline3
return oldline3
else:
print 'problem with cluster:',self.cluster,' cluster_line=',cluster_line
return cluster_line
def get_CCmass_props(self):
'''
get properties in the cc mass (aka ldaclensing/) script outputs
'''
#cattools.ldaccat_to_ds9(incat=self.cat_lens,outcat=self.fl_cut_lensing+'_ra_dec.tsv',keys=['ALPHA_J2000','DELTA_J2000'])
#MACS1126: z=0.436
####################################
## get measurements from mass output file
####################################
fo_mass=open(self.fl_mass)
self.mass_lines=fo_mass.readlines()
for ml in self.mass_lines:
mline=ml.strip()
if mline.startswith('bootstrap median mass'):
mnum=mline.split(': ')[-1]
if imagetools.isfloat(mnum):
self.clprop_ccmass=float(mnum)
else:
raise Exception('line with bootstrap median mass in it doesnt match needed format, it looks like this:\n'+ml)
# Eventually do something that will get this:
# '
# 'bootstrap 84th percentile: 1.338070e+15 (median +2.383156e+14)\n',
elif mline.startswith('bootstrap 16th percentile'):
mnum_unit=mline.split(': ')[-1]
mnum = mnum_unit.split(' (')[0]
if imagetools.isfloat(mnum):
self.clprop_ccmass_errlow=float(mnum)
else:
raise Exception('line with bootstrap 16th percentile in it doesnt match needed format, it looks like this:\n'+ml)
elif mline.startswith('bootstrap 84th percentile'):
mnum_unit=mline.split(': ')[-1]
mnum = mnum_unit.split(' (')[0]
if imagetools.isfloat(mnum):
self.clprop_ccmass_errhigh = float(mnum)
else:
raise Exception('line with bootstrap 84th percentile in it doesnt match needed format, it looks like this:\n'+ml)
elif mline.startswith('x_cluster'):
mnum_unit=mline.split(': ')[-1]
mnum = mnum_unit.split('[')[0]
if imagetools.isfloat(mnum):
self.clprop_x_cluster=float(mnum)
else:
raise Exception('line with x_cluster in it doesnt match needed format, it looks like this:\n'+ml)
elif mline.startswith('y_cluster'):
mnum_unit=mline.split(': ')[-1]
mnum = mnum_unit.split('[')[0]
if imagetools.isfloat(mnum):
self.clprop_y_cluster=float(mnum)
else:
raise Exception('line with y_cluster in it doesnt match needed format, it looks like this:\n'+ml)
elif 'r_s' in ml:
mnum=mline.split(': ')[-1]
if imagetools.isfloat(mnum):
self.clprop_r_s=float(mnum)
else:
raise Exception('line with r_s in it doesnt match needed format, it looks like this:\n'+ml)
elif 'D_d' in ml:
mnum=mline.split(': ')[-1]
if imagetools.isfloat(mnum):
self.clprop_D_d=float(mnum)
else:
raise Exception('line with D_d in it doesnt match needed format, it looks like this:\n'+ml)
elif 'beta_inf' in ml:
mnum=mline.split(': ')[-1]
if imagetools.isfloat(mnum):
self.clprop_beta_inf=float(mnum)
else:
raise Exception('line with beta_inf in it doesnt match needed format, it looks like this:\n'+ml)
elif '<beta_s>' in ml:
mnum=mline.split(': ')[-1]
if imagetools.isfloat(mnum):
self.clprop_beta_s=float(mnum)
else:
raise Exception('line with beta_s in it doesnt match needed format, it looks like this:\n'+ml)
elif '<beta_s^2>' in ml:
mnum=mline.split(': ')[-1]
if imagetools.isfloat(mnum):
self.clprop_beta_s_sq=float(mnum)
else:
raise Exception('line with beta_s^2 in it doesnt match needed format, it looks like this:\n'+ml)
try:
print ' beta_inf=',self.clprop_beta_inf , ' <beta_s>=',self.clprop_beta_s , ' <beta_s^2>=',self.clprop_beta_s_sq ,' D_d=',self.clprop_D_d , ' r_s=',self.clprop_r_s
self.beta_inf=self.clprop_beta_inf
self.beta_s=self.clprop_beta_s
self.beta_s_sq=self.clprop_beta_s_sq
self.D_d=self.clprop_D_d
self.r_s=self.clprop_r_s
except:
ns.update(locals())
raise Exception('problem recovering all properties from '+self.fl_mass)
def get_best_mags(self):
self.cat_mag=ldac.openObjectFile(self.fl_photom)
self.k_mags=[]
for k in self.cat_mag.keys():
if k.startswith('FLUX_APER1-SUBARU-9') or k.startswith('FLUX_APER1-SUBARU-10'):
if '10_' in k and '-1-' not in k:
print 'skipping over: ',k
continue
if 'COADD' in k: continue
if 'bpz_inputs' in k: continue
magk=self.cat_mag[k]
if magk.max() > magk.min():
#adam-improve# you could improve this by using the extrema to get bins showing the full width!
self.k_mags.append(k)
print 'all mags: ',self.k_mags
self.k_bands,self.k_mags=pipelinetools.get_best_mags(self.fl_photom)
print 'best mags: ',self.k_mags
return self.k_mags
def PZmass_outputs(self,ml_base):
self.ml_base = ml_base
self.fl_pccat = self.ml_base+'-post_cuts.cat'
#pc_cat=ldac.openObjectFile(pc_catfl)
self.fl_pz_out_base = ml_base #"%(dir_pzwork)s/%(cluster)s.%(filter)s.%(lens)s.maxlikelensing" % (self.props)
def CCmass_cosmo_calcs(self):
## get radial profile from CCmass info
self.prof_table=ascii.read(self.fl_prof,names=["radial_arcmin","shear_ave","shear_err"])
self.prof_rad_arcmin=self.prof_table["radial_arcmin"].data
self.prof_gt=self.prof_table["shear_ave"].data
self.prof_gt_err=self.prof_table["shear_err"].data
####################################
## assumed cosmology and define H(z)
####################################
self.clprop_conc=4.0
u_convert=(1.0*astropy.units.arcmin).to(astropy.units.rad)
self.prof_rad_radians = self.prof_rad_arcmin * u_convert.value #r in radians
self.prof_rad_mpc=self.clprop_D_d*self.prof_rad_radians #Mpcs from center of self.cluster
self.prof_rad_x = self.prof_rad_mpc / self.clprop_r_s # dimensionless radial distance
H0=70.0 #km/s/Mpc
#1 megaparsec = 3.09x10**19 km
#H0_SI=70.0*(1/3.09e19)
#Hubble time in years: (70.0*(1/3.09e19))**-1*(3600*24*365.25)**-1
Om=.3
Occ=.7
H = lambda z : H0*np.sqrt(Om*(1+z)**3+Occ)
H_at_z=H(self.clprop_z)*(1/3.09e19)
#G_SI=6.67e-11 #SI units (m^3 kg^-1 s^-2)
#G_const_Mpc_Msun_s = 4.51737014558e-48
G = 4.51737014558e-48 #cosmo units (Mpc^3 Msun^-1 s^-2)
c_light_SI = 3.0e8
#1 megaparsec = 3.09x10**22 m
c_light = c_light_SI *(1/3.09e22) #cosmo units (Mpc s^-1)
self.rho_c=3*H_at_z**2/(8*np.pi*G) #yes these units are consistent [Msun / Mpc^3]
self.delta_c=(200.0/3.0)*(self.clprop_conc**3/(np.log(1+self.clprop_conc)-self.clprop_conc/(1+self.clprop_conc)))
####################################
## calculate relevant lensing values
####################################
self.clprop_beta = self.clprop_beta_s * self.clprop_beta_inf #beta=D_ds/D_s == beta_s * beta_inf and D_s/(D_d*D_ds)=1/(beta*D_d) eqn #9
self.clprop_Sigma_c = c_light**2/(4*np.pi*G)* 1/(self.clprop_beta*self.clprop_D_d) #1/(beta*D_d) == D_s/(D_d*D_ds) eqn #9
self.beta=self.clprop_beta
self.Sigma_c=self.clprop_Sigma_c
except:
ns.update(locals())
raise
def get_mycls_from_list(clusterlist_fl='/u/ki/awright/gravitas/maxlikelensing/FgasThesis_sample_mine.list'):
worklist = readtxtfile(clusterlist_fl)
#worklist = readtxtfile('FgasThesis_sample_not_mine.list')
#worklist = readtxtfile('FgasThesis_sample_mine.list')
clusters = [x[0] for x in worklist]
filters = [x[1] for x in worklist]
lenses = [x[2] for x in worklist]
print 'mycls=[my_cluster(cluster,filter,lens) for cluster in %s] ' % (repr(clusterlist_fl))
mycls=[]
for cluster,filter,lens in zip(clusters,filters,lenses):
mycl=my_cluster(cluster,filter,lens)
mycls.append(mycl)
return mycls
def get_a_cluster_filter_lens(cluster):
'''don't overuse this, it's going to re-make an instance of my_cluster each time it's called'''
if cluster in allfgas_clusters:
theone=allfgas_clusters==cluster
filter=allfgas_filters[theone][0]
lens=allfgas_lenses[theone][0]
return (cluster,filter,lens)
else:
raise Exception('This is not in the `allfgas_clusters` list!')
def get_a_cluster(cluster):
'''don't overuse this, it's going to re-make an instance of my_cluster each time it's called'''
if cluster in allfgas_clusters:
theone=allfgas_clusters==cluster
filter=allfgas_filters[theone][0]
lens=allfgas_lenses[theone][0]
mycl=my_cluster(cluster,filter,lens)
return mycl
else:
raise Exception('This is not in the `allfgas_clusters` list!')
if __name__=='__main__':
try:
cluster=os.environ['cluster']
filter=os.environ['filter']
lens=os.environ['lens']
mycl=my_cluster(cluster,filter,lens)
print 'cluster set by env vars'
print 'mycl=my_cluster('+cluster+','+filter+','+lens+')'
except KeyError:
#worklist = readtxtfile('FgasThesis_sample.list')
import imagetools, adam_quicktools_ArgCleaner
args=adam_quicktools_ArgCleaner.ArgCleaner(sys.argv)
try:
clusterlist_fl=args[0]
print 'cluster list set by input arg: ',clusterlist_fl
except:
clusterlist_fl='FgasThesis_sample_mine.list'
print 'cluster list set by default: ',clusterlist_fl
#mycls=get_mycls_from_list(clusterlist_fl)
#for mycl in mycls:
# mycl.get_best_mags()
#for mycl in mycls:
# oo=mycl.get_latex_table_line()
#sys.exit()
#for mycl in mycls:
# print ' '.join(['cp',mycl.fl_cut_lensing_step,mycl.fl_pz_out_base.replace('maxlikelensing','cut_lensing.cat')])
# print out stuff for the latex table!
#mycls=get_mycls_from_list('/u/ki/awright/gravitas/maxlikelensing/FgasThesis_sample_mine.list')
#for mycl in mycls: mycl.get_best_mags()
mycls2=get_mycls_from_list('/u/ki/awright/gravitas/maxlikelensing/FgasThesis_sample_not_mine.list')
for mycl in mycls2:
cc_cuts3fl=mycl.dir_lens+'cc_cuts3.dat'
print 'grep MAG %s | grep > 22' % (cc_cuts3fl)
for mycl in mycls2:
#mycl.get_best_mags()
print mycl.cluster, mycl.lens , mycl.filter , mycl.lensmag
sys.exit()
#for mycl in mycls:
#print repr(' & '.join(oo))
for mycl in mycls2:
#mycl.get_best_mags()
oo=mycl.get_latex_table_line()
#print repr(' & '.join(oo))
# mag_fls = glob.glob("/nfs/slac/g/ki/ki05/anja/SUBARU/%s/PHOTOMETRY_W-*_aper/%s.slr.cat" % (cl,cl))
# if cl=='MACS1621+38':
# mag_fl='/nfs/slac/g/ki/ki05/anja/SUBARU/MACS1621+38/PHOTOMETRY_W-C-IC_aper/MACS1621+38.slr.cat'
# elif cl=='A383':
# mag_fl='/nfs/slac/g/ki/ki05/anja/SUBARU/A383/PHOTOMETRY_W-S-I+_aper/A383.slr.cat'
# else:
# mag_fl=mag_fls[0]
|
|
import operator
from django.db.models import *
from django.contrib.auth.models import User, UserManager
from fate.custom.batch_select import batch_select
from fate.game.managers import *
test_types = [(x,x) for x in ("Short answer", "MC", "Argument")]
test_features = [(x,x) for x in ("Map", "Quotation", "Cartoon", "Chart")]
# Tag is a base class for Region, Period, and Theme
class Tag(Model):
obj = objects = BaseManager()
name = CharField(max_length=30, unique=True)
def __unicode__(self):
return self.name
class Meta:
abstract = True
class Region(Tag):
abbr = CharField(max_length=30, unique=True)
class Period(Tag):
pass
class Theme(Tag):
pass
class Card(Model):
obj = objects = CardManager()
region_number = IntegerField()
region = ForeignKey(Region)
period = ForeignKey(Period, null=True)
theme = ForeignKey(Theme)
points = IntegerField(default=1)
question = CharField(max_length=500)
answers = TextField()
right_answer = IntegerField()
def __unicode__(self):
return u"%s: %d, %s" % (self.region, self.region_number, self.theme)
def answer_list(self):
return self.answers.strip().split("\n\n")
class Meta:
unique_together = (("region", "region_number"))
class ResourceFile(Model):
obj = objects = BaseManager()
region = ForeignKey(Region, blank=True, null=True)
period = ForeignKey(Period, blank=True, null=True)
theme = ForeignKey(Theme, blank=True, null=True)
file = FileField(upload_to="resources/", max_length=80)
description = TextField()
def __unicode__(self):
return u"%s, %s, %s, file: %s" % (self.region, self.period, self.theme, self.file.name)
class Badge(Model):
obj = objects = BadgeManager()
name = CharField(max_length=100, unique=True)
cards = ManyToManyField(Card)
themes = ManyToManyField(Theme, blank=True, null=True)
regions = ManyToManyField(Region, blank=True, null=True)
periods = ManyToManyField(Period, blank=True, null=True)
def __unicode__(self):
return self.name
class Classroom(Model):
obj = objects = BaseManager()
name = CharField(max_length=100, blank=True)
teacher = ForeignKey(User)
# teacher = ManyToManyField(Teacher, through="TeacherAssignment")
def __unicode__(self):
return "%s: %s" % (self.teacher, self.name)
def get_students(self):
return Student.objects.filter(classroom = self)
def get_total_score(self):
answers = LastAnswer.objects.filter(answer__test__student__classroom = self)
return sum( answers.values_list('answer__card__points', flat=True) )
def calc_games_played(self):
return Game.objects.filter(student__classroom = self).filter(won = True).distinct().count()
def get_badge_total(self):
return Badge.objects.filter(studentbadge__student__classroom=self).distinct().count()
def get_points_data(self):
# return Score.objects.values_list('game__date').annotate(pts = Sum('card__points')).filter(game__student__classroom = self).order_by('game__date')
answers = LastAnswer.objects.values_list('answer__test__taken')
return answers.annotate(pts = Sum('answer__card__points')).filter(
answer__test__student__classroom = self).order_by('answer__test__taken')
def get_recent_careers(self):
return StudentCareer.objects.filter(student__classroom=self).distinct().order_by('-date').select_related('student','career')[:10]
def get_best_qs(self, category):
qs = Card.objects.get_count_qs(classroom=self.id, category=category)
sorted_x = sorted(qs.iteritems(), key=operator.itemgetter(1), reverse=True)
per = "%.1f" % sorted_x[0][1]
return sorted_x[0][0] + " (" + per + "%)"
def get_worst_qs(self, category):
qs = Card.objects.get_count_qs(classroom=self.id, category=category)
sorted_x = sorted(qs.iteritems(), key=operator.itemgetter(1))
if not sorted_x: return '0'
per = "%.1f" % sorted_x[0][1]
return sorted_x[0][0] + " (" + per + "%)"
# still haven't figured out the best way to call these from a template. custom templatetags I suppose.
def get_best_theme(self):
return self.get_best_qs(category='theme')
def get_worst_theme(self):
return self.get_worst_qs(category='theme')
def get_best_period(self):
return self.get_best_qs(category='period')
def get_worst_period(self):
return self.get_worst_qs(category='period')
def get_best_region(self):
return self.get_best_qs(category='region')
def get_worst_region(self):
return self.get_worst_qs(category='region')
class Student(Model):
obj = objects = StudentManager()
first_name = CharField(max_length=100)
last_name = CharField(max_length=100)
classroom = ForeignKey(Classroom)
user = ForeignKey(User, blank=True, null=True)
# classroom = ManyToManyField(Classroom, through="StudentSchedule")
def __unicode__(self):
return "%s %s" % (self.first_name, self.last_name)
def get_total_mastery(self):
"""UNUSED?"""
cards = Card.objects.filter(score__game__student = self)
# cards = Card.objects.filter(answer__last_answer__student = self)
earned = cards.values_list('id',flat=True).distinct().count()
return 100 * ( earned * 1.0 / Card.objects.count() )
def get_total_points(self):
"""UNUSED?"""
return sum( list( Score.objects.filter(game__student = self).values_list('card__points', flat=True) ) )
def get_badges(self):
badges = Badge.objects.filter(studentbadge__student=self).distinct().batch_select('themes','regions','periods')
for b in badges:
b.tooltip, extra = "", []
# group if all present
if len(b.themes_all) == 5:
b.themes_all = []
extra.append('All Themes')
if len(b.regions_all) == 4:
b.regions_all = []
extra.append(' All Regions')
if len(b.periods_all) == 5:
b.periods_all = []
extra.append('All Periods')
tags = b.themes_all + b.periods_all + b.regions_all
b.tooltip = ", ".join([ t.name for t in tags ])
if extra:
b.tooltip += ', ' + ", ".join(extra)
return badges
def get_badge_total(self):
return Badge.objects.filter(studentbadge__student=self).distinct().count()
def get_careers(self):
return Career.objects.filter(studentcareer__student=self).distinct()
def get_career_total(self):
return self.get_careers().count()
def get_points_data(self):
"""UNUSED?"""
qs = Score.objects.values_list('game__date').annotate(pts = Sum('card__points')).filter(game__student = self).order_by('game__date')
new_list = []
total = 0
for tup in qs:
total += tup[1]
new_list.append( (tup[0], total) )
return new_list
class Meta:
unique_together = (("first_name", "last_name", "classroom"))
class StudentBadge(Model):
obj = objects = BaseManager()
student = ForeignKey(Student)
badge = ForeignKey(Badge)
date = DateField(auto_now_add=True)
class Career(Model):
obj = objects = CareerManager()
name = CharField(max_length=100, unique=True)
badges = ManyToManyField(Badge)
badge_count = IntegerField(default=1)
def __unicode__(self):
return self.name
class StudentCareer(Model):
obj = objects = BaseManager()
student = ForeignKey(Student)
career = ForeignKey(Career)
date = DateField(auto_now_add=True)
class Game(Model):
obj = objects = BaseManager()
student = ForeignKey(Student)
career = ForeignKey(Career, blank=True, null=True)
region = ForeignKey(Region, blank=True, null=True)
date = DateField(auto_now_add=True)
won = BooleanField()
# def __unicode__(self):
# return u''
class Score(Model):
obj = objects = BaseManager()
game = ForeignKey(Game)
card = ForeignKey(Card)
notes = CharField(max_length=200, blank=True)
class Meta:
unique_together = ( ("game", "card") )
class TestPreset(Model):
obj = objects = BaseManager()
region = ForeignKey(Region, blank=True, null=True)
period = ForeignKey(Period, blank=True, null=True)
theme = ForeignKey(Theme, blank=True, null=True)
def __unicode__(self):
return u"%s, %s, %s" % (self.region, self.period, self.theme)
class TestSetup(Model):
obj = objects = BaseManager()
classroom = ForeignKey(Classroom)
test_preset = ForeignKey(TestPreset, help_text="You can choose a test preset OR the options below", blank=True, null=True)
match_all = BooleanField(help_text="Include questions that match all selected categories", blank=True)
region = ManyToManyField(Region, blank=True, null=True)
period = ManyToManyField(Period, blank=True, null=True)
theme = ManyToManyField(Theme, blank=True, null=True)
type = CharField(max_length=20, blank=True, null=True, choices=test_types)
feature = CharField(max_length=20, blank=True, null=True, choices=test_features)
def __unicode__(self):
return u"%s: %s, %s, %s" % (self.classroom, self.region, self.period, self.theme)
class Test(Model):
obj = objects = BaseManager()
student = ForeignKey(Student)
taken = DateTimeField(auto_now_add=True)
score = IntegerField(default=0, blank=True)
done = BooleanField(default=False)
def calculate_score(self):
answers = self.answers.all()
correct = len([a for a in answers if a.correct])
self.score = int(round( float(correct) / answers.count() * 100 ) )
return self.score
def cards(self):
return set(a.card for a in self.answers.all())
def __unicode__(self):
return u"%s, %s%s" % (self.student, self.score, ", done" if self.done else '')
class Answer(Model):
# student = ForeignKey(Student, related_name="answers")
obj = objects = BaseManager()
card = ForeignKey(Card)
test = ForeignKey(Test, related_name="answers")
answer = CharField(max_length=200, blank=True, null=True)
correct = IntegerField(help_text="0:unset, 1:correct, 2:incorrect", default=0)
def __unicode__(self):
return u"%s, %s" % (self.test.student, self.correct)
class LastAnswer(Model):
obj = objects = BaseManager()
answer = ForeignKey(Answer)
# note: can't use related_name because of annotations in get_sorted_students()
student = ForeignKey(Student)
#class StudentSchedule(Model):
# student = ForeignKey(Student)
# classroom = ForeignKey(Classroom)
# start_date = DateField()
# stop_date = DateField()
#class TeacherAssignment(Model):
# teacher = ForeignKey(User)
# classroom = ForeignKey(Classroom)
# start_date = DateField()
# stop_date = DateField()
|
|
import logging
from redbreast.core import WFException
from redbreast.core.spec import *
from uuid import uuid4
import time
LOG = logging.getLogger(__name__)
class Task(object):
ACTIVE = 1
READY = 2
EXECUTING = 4
EXECUTED = 8
COMPLETED = 16
# active --> ready() --> ready
# ready --> execute()
# if async ---> executing
# async-callback --> executed
# if sync --> executed --> route() --> completed
# executed --> transfer() ---> completed
state_names = {
ACTIVE: 'ACTIVE',
READY: 'READY',
EXECUTING: 'EXECUTING',
EXECUTED: 'EXECUTED',
COMPLETED: 'COMPLETED',
}
state_fire_event_names = {
ACTIVE: 'task:enter',
READY: 'task:ready',
EXECUTING: 'task:executing',
EXECUTED: 'task:executed',
COMPLETED: 'task:completed',
}
class Iterator(object):
def __init__(self, current, filter=None):
self.filter = filter
self.path = [current]
def __iter__(self):
return self
def _next(self):
if len(self.path) == 0:
raise StopIteration()
current = self.path[-1]
if current.children:
self.path.append(current.children[0])
if self.filter is not None and current.state & self.filter == 0:
return None
return current
while True:
old_child = self.path.pop(-1)
if len(self.path) == 0:
break;
parent = self.path[-1]
pos = parent.children.index(old_child)
if len(parent.children) > (pos + 1):
self.path.append(parent.children[pos + 1])
break
if self.filter is not None and current.state & self.filter == 0:
return None
return current
def next(self):
while True:
next = self._next()
if next is not None:
return next
def __init__(self, workflow, task_spec, parent=None, state=ACTIVE, **kwargs):
self.uuid = uuid4()
self.workflow = workflow
self.spec = task_spec
self.parents = []
if parent:
self.parents.append(parent)
self.data = {}
#data for deliver
self.deliver_msg = None
self.deliver_from_msg = kwargs.get('message', None)
self.operator = kwargs.get('operator', None)
self.next_tasks = []
#state
self._state = None
self.state_history = []
self.state = state
self.children = []
if parent is not None:
for p in self.parents:
p.add_child(self)
def __iter__(self):
return Task.Iterator(self)
def _getstate(self):
return self._state
def _setstate(self, value):
if self._state == value:
return
old = self.get_state_name()
self._state = value
self.state_history.append(value)
self.last_state_change = time.time()
map = {}
#pubsub
event_type = self.state_fire_event_names.get(self.state, None)
if event_type:
# spec pubsub
self.workflow.spec.fire(event_type, task=self, workflow=self.workflow)
# instance pubsub
self.workflow.fire(event_type, task=self, workflow=self.workflow)
# extra instance pubsub
self.workflow.fire("task:state_changed", task=self, workflow=self.workflow)
LOG.debug("Moving '%s' from %s to %s" %
(self.get_name(), old, self.get_state_name()))
def _delstate(self):
del self._state
state = property(_getstate, _setstate, _delstate, "State property.")
def get_alldata(self):
return self.data
def get_data(self, name, default=None):
return self.data.get(name, default)
def set_data(self, name, value=None):
if isinstance(name, dict):
for key in name:
self.data[key] = name[key]
elif isinstance(name, str):
self.data[name] = value
#self.fire("workflow:data_changed", workflow=self)
def get_spec_alldata(self):
return self.spec.get_alldata()
def get_spec_data(self, name, default=None):
return self.spec.get_data(name, default)
def set_spec_data(self, name, value=None):
return self.spec.set_spec_data(name, value)
def get_level(self):
level = 0
task = self.parent
while task is not None:
level += 1
task = task.parent
return level
def get_unique_id(self):
return self.uuid
def get_state_name(self):
return self.state_names.get(self.state, None)
def get_name(self):
return self.spec.name
def get_spec_name(self):
return self.spec.get_spec_name()
def get_desc(self):
return self.spec.get_desc()
def get_next_tasks(self):
return [(task.name, task.get_desc()) for task in self.spec.outputs]
def set_next_tasks(self, *args):
self.next_tasks = [task for task in args]
def set_next_task(self, task):
self.next_tasks = [task]
def add_parent(self, parent):
self.parents.append(parent)
def add_child(self, child):
self.children.append(child)
self.workflow.fire("task:connect", from_task=self, to_task=child, workflow=self.workflow,)
def remove_parent(self, parent):
self.parents.remove(parent)
def remove_child(self, child):
self.children.remove(child)
self.workflow.fire("task:disconnect", from_task=self, to_task=child, workflow=self.workflow,)
def kill(self):
for p in self.parents:
p.remove_child(self)
self.parents = []
def ready(self):
return self.spec.ready(self, self.workflow)
def execute(self, transfer=False):
return self.spec.execute(self, self.workflow, transfer=transfer)
def deliver(self, message=None, next_tasks=[], async=True):
self.deliver_msg = message
self.operator = self.workflow.operator
self.next_tasks = next_tasks
self.state = Task.READY
if async == False:
return self.execute(transfer=True)
return True
def is_descendant_of(self, parent):
if not self.parents:
return False
for p in self.parents:
if self.parent == p:
return True
for p in self.parents:
if p.is_descendant_of(parent):
return True
def find(self, task_spec):
if self.spec == task_spec:
return self
for child in self:
if child.spec != task_spec:
continue
return child
return None
def __repr__(self):
return '<Task (%s) in state %s at %s>' % (
self.spec.name,
self.get_state_name(),
hex(id(self)))
def get_dump(self, indent=0, recursive=True):
dbg = (' ' * indent * 2)
dbg += ' %s ' % (self.get_name())
dbg += ' (%s)' % self.get_state_name()
dbg += ' Children: %s' % len(self.children)
if recursive:
for child in self.children:
dbg += '\n' + child.get_dump(indent + 1)
return dbg
def dump(self, indent=0):
print self.get_dump()
|
|
'''
Kivy framework
==============
Kivy is an open source library for developing multi-touch applications. It is
cross-platform (Linux/OSX/Windows/Android/iOS) and released under
the terms of the `MIT License <https://en.wikipedia.org/wiki/MIT_License>`_.
It comes with native support for many multi-touch input devices, a growing
library of multi-touch aware widgets and hardware accelerated OpenGL drawing.
Kivy is designed to let you focus on building custom and highly interactive
applications as quickly and easily as possible.
With Kivy, you can take full advantage of the dynamic nature of Python. There
are thousands of high-quality, free libraries that can be integrated in your
application. At the same time, performance-critical parts are implemented
using `Cython <http://cython.org/>`_.
See http://kivy.org for more information.
'''
__all__ = (
'require', 'parse_kivy_version',
'kivy_configure', 'kivy_register_post_configuration',
'kivy_options', 'kivy_base_dir',
'kivy_modules_dir', 'kivy_data_dir', 'kivy_shader_dir',
'kivy_icons_dir', 'kivy_home_dir',
'kivy_config_fn', 'kivy_usermodules_dir',
)
import sys
import shutil
from getopt import getopt, GetoptError
from os import environ, mkdir
from os.path import dirname, join, basename, exists, expanduser
import pkgutil
import re
from kivy.logger import Logger, LOG_LEVELS
from kivy.utils import platform
MAJOR = 2
MINOR = 0
MICRO = 0
RELEASE = False
__version__ = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
if not RELEASE:
# if it's a rcx release, it's not proceeded by a period. If it is a
# devx release, it must start with a period
__version__ += '.dev0'
try:
from kivy.version import __hash__, __date__
__hash__ = __hash__[:7]
except ImportError:
__hash__ = __date__ = ''
# internals for post-configuration
__kivy_post_configuration = []
if platform == 'macosx' and sys.maxsize < 9223372036854775807:
r = '''Unsupported Python version detected!:
Kivy requires a 64 bit version of Python to run on OS X. We strongly
advise you to use the version of Python that is provided by Apple
(don't use ports, fink or homebrew unless you know what you're
doing).
See http://kivy.org/docs/installation/installation-macosx.html for
details.
'''
Logger.critical(r)
if sys.version_info[0] == 2:
Logger.critical(
'Unsupported Python version detected!: Kivy 2.0.0 and higher does not '
'support Python 2. Please upgrade to Python 3, or downgrade Kivy to '
'1.11.0 - the last Kivy release that still supports Python 2.')
def parse_kivy_version(version):
"""Parses the kivy version as described in :func:`require` into a 3-tuple
of ([x, y, z], 'rc|a|b|dev|post', 'N') where N is the tag revision. The
last two elements may be None.
"""
m = re.match(
'^([0-9]+)\\.([0-9]+)\\.([0-9]+?)(rc|a|b|\\.dev|\\.post)?([0-9]+)?$',
version)
if m is None:
raise Exception('Revision format must be X.Y.Z[-tag]')
major, minor, micro, tag, tagrev = m.groups()
if tag == '.dev':
tag = 'dev'
if tag == '.post':
tag = 'post'
return [int(major), int(minor), int(micro)], tag, tagrev
def require(version):
'''Require can be used to check the minimum version required to run a Kivy
application. For example, you can start your application code like this::
import kivy
kivy.require('1.0.1')
If a user attempts to run your application with a version of Kivy that is
older than the specified version, an Exception is raised.
The Kivy version string is built like this::
X.Y.Z[tag[tagrevision]]
X is the major version
Y is the minor version
Z is the bugfixes revision
The tag is optional, but may be one of '.dev', '.post', 'a', 'b', or 'rc'.
The tagrevision is the revision number of the tag.
.. warning::
You must not ask for a version with a tag, except -dev. Asking for a
'dev' version will just warn the user if the current Kivy
version is not a -dev, but it will never raise an exception.
You must not ask for a version with a tagrevision.
'''
# user version
revision, tag, tagrev = parse_kivy_version(version)
# current version
sysrevision, systag, systagrev = parse_kivy_version(__version__)
if tag and not systag:
Logger.warning('Application requested a dev version of Kivy. '
'(You have %s, but the application requires %s)' % (
__version__, version))
# not tag rev (-alpha-1, -beta-x) allowed.
if tagrev is not None:
raise Exception('Revision format must not contain any tagrevision')
# finally, checking revision
if sysrevision < revision:
raise Exception('The version of Kivy installed on this system '
'is too old. '
'(You have %s, but the application requires %s)' % (
__version__, version))
def kivy_configure():
'''Call post-configuration of Kivy.
This function must be called if you create the window yourself.
'''
for callback in __kivy_post_configuration:
callback()
def get_includes():
'''Retrieves the directories containing includes needed to build new Cython
modules with Kivy as a dependency. Currently returns the location of the
kivy.graphics module.
.. versionadded:: 1.9.1
'''
root_dir = dirname(__file__)
return [join(root_dir, 'graphics'), join(root_dir, 'tools', 'gles_compat'),
join(root_dir, 'include')]
def kivy_register_post_configuration(callback):
'''Register a function to be called when kivy_configure() is called.
.. warning::
Internal use only.
'''
__kivy_post_configuration.append(callback)
def kivy_usage():
'''Kivy Usage: %s [OPTION...]::
-h, --help
Prints this help message.
-d, --debug
Shows debug log.
-a, --auto-fullscreen
Force 'auto' fullscreen mode (no resolution change).
Uses your display's resolution. This is most likely what you want.
-c, --config section:key[:value]
Set a custom [section] key=value in the configuration object.
-f, --fullscreen
Force running in fullscreen mode.
-k, --fake-fullscreen
Force 'fake' fullscreen mode (no window border/decoration).
Uses the resolution specified by width and height in your config.
-w, --windowed
Force running in a window.
-p, --provider id:provider[,options]
Add an input provider (eg: ccvtable1:tuio,192.168.0.1:3333).
-m mod, --module=mod
Activate a module (use "list" to get a list of available modules).
-r, --rotation
Rotate the window's contents (0, 90, 180, 270).
-s, --save
Save current Kivy configuration.
--size=640x480
Size of window geometry.
--dpi=96
Manually overload the Window DPI (for testing only.)
'''
print(kivy_usage.__doc__ % (basename(sys.argv[0])))
#: Global settings options for kivy
kivy_options = {
'window': ('egl_rpi', 'sdl2', 'pygame', 'sdl', 'x11'),
'text': ('pil', 'sdl2', 'pygame', 'sdlttf'),
'video': (
'gstplayer', 'ffmpeg', 'ffpyplayer', 'null'),
'audio': (
'gstplayer', 'pygame', 'ffpyplayer', 'sdl2',
'avplayer'),
'image': ('tex', 'imageio', 'dds', 'sdl2', 'pygame', 'pil', 'ffpy', 'gif'),
'camera': ('opencv', 'gi', 'avfoundation',
'android', 'picamera'),
'spelling': ('enchant', 'osxappkit', ),
'clipboard': (
'android', 'winctypes', 'xsel', 'xclip', 'dbusklipper', 'nspaste',
'sdl2', 'pygame', 'dummy', 'gtk3', )}
# Read environment
for option in kivy_options:
key = 'KIVY_%s' % option.upper()
if key in environ:
try:
if type(kivy_options[option]) in (list, tuple):
kivy_options[option] = environ[key].split(',')
else:
kivy_options[option] = environ[key].lower() in \
('true', '1', 'yes')
except Exception:
Logger.warning('Core: Wrong value for %s environment key' % key)
Logger.exception('')
# Extract all needed path in kivy
#: Kivy directory
kivy_base_dir = dirname(sys.modules[__name__].__file__)
#: Kivy modules directory
kivy_modules_dir = environ.get('KIVY_MODULES_DIR',
join(kivy_base_dir, 'modules'))
#: Kivy data directory
kivy_data_dir = environ.get('KIVY_DATA_DIR',
join(kivy_base_dir, 'data'))
#: Kivy binary deps directory
kivy_binary_deps_dir = environ.get('KIVY_BINARY_DEPS',
join(kivy_base_dir, 'binary_deps'))
#: Kivy glsl shader directory
kivy_shader_dir = join(kivy_data_dir, 'glsl')
#: Kivy icons config path (don't remove the last '')
kivy_icons_dir = join(kivy_data_dir, 'icons', '')
#: Kivy user-home storage directory
kivy_home_dir = ''
#: Kivy configuration filename
kivy_config_fn = ''
#: Kivy user modules directory
kivy_usermodules_dir = ''
# if there are deps, import them so they can do their magic.
import kivy.deps
_packages = []
for importer, modname, ispkg in pkgutil.iter_modules(kivy.deps.__path__):
if not ispkg:
continue
if modname.startswith('gst'):
_packages.insert(0, (importer, modname, 'kivy.deps'))
else:
_packages.append((importer, modname, 'kivy.deps'))
try:
import kivy_deps
for importer, modname, ispkg in pkgutil.iter_modules(kivy_deps.__path__):
if not ispkg:
continue
if modname.startswith('gst'):
_packages.insert(0, (importer, modname, 'kivy_deps'))
else:
_packages.append((importer, modname, 'kivy_deps'))
except ImportError:
pass
_logging_msgs = []
for importer, modname, package in _packages:
try:
mod = importer.find_module(modname).load_module(modname)
version = ''
if hasattr(mod, '__version__'):
version = ' {}'.format(mod.__version__)
_logging_msgs.append(
'deps: Successfully imported "{}.{}"{}'.
format(package, modname, version))
except ImportError as e:
Logger.warning(
'deps: Error importing dependency "{}.{}": {}'.
format(package, modname, str(e)))
# Don't go further if we generate documentation
if any(name in sys.argv[0] for name in ('sphinx-build', 'autobuild.py')):
environ['KIVY_DOC'] = '1'
if 'sphinx-build' in sys.argv[0]:
environ['KIVY_DOC_INCLUDE'] = '1'
if any(('nosetests' in arg or 'pytest' in arg) for arg in sys.argv):
environ['KIVY_UNITTEST'] = '1'
if any('pyinstaller' in arg.lower() for arg in sys.argv):
environ['KIVY_PACKAGING'] = '1'
if not environ.get('KIVY_DOC_INCLUDE'):
# Configuration management
if 'KIVY_HOME' in environ:
kivy_home_dir = expanduser(environ['KIVY_HOME'])
else:
user_home_dir = expanduser('~')
if platform == 'android':
user_home_dir = environ['ANDROID_APP_PATH']
elif platform == 'ios':
user_home_dir = join(expanduser('~'), 'Documents')
kivy_home_dir = join(user_home_dir, '.kivy')
kivy_config_fn = join(kivy_home_dir, 'config.ini')
kivy_usermodules_dir = join(kivy_home_dir, 'mods')
icon_dir = join(kivy_home_dir, 'icon')
if 'KIVY_NO_CONFIG' not in environ:
if not exists(kivy_home_dir):
mkdir(kivy_home_dir)
if not exists(kivy_usermodules_dir):
mkdir(kivy_usermodules_dir)
if not exists(icon_dir):
try:
shutil.copytree(join(kivy_data_dir, 'logo'), icon_dir)
except:
Logger.exception('Error when copying logo directory')
# configuration
from kivy.config import Config
# Set level of logger
level = LOG_LEVELS.get(Config.get('kivy', 'log_level'))
Logger.setLevel(level=level)
# Can be overrided in command line
if ('KIVY_UNITTEST' not in environ and
'KIVY_PACKAGING' not in environ and
'KIVY_NO_ARGS' not in environ):
# save sys argv, otherwise, gstreamer use it and display help..
sys_argv = sys.argv
sys.argv = sys.argv[:1]
try:
opts, args = getopt(sys_argv[1:], 'hp:fkawFem:sr:dc:', [
'help', 'fullscreen', 'windowed', 'fps', 'event',
'module=', 'save', 'fake-fullscreen', 'auto-fullscreen',
'multiprocessing-fork', 'display=', 'size=', 'rotate=',
'config=', 'debug', 'dpi='])
except GetoptError as err:
Logger.error('Core: %s' % str(err))
kivy_usage()
sys.exit(2)
mp_fork = None
try:
for opt, arg in opts:
if opt == '--multiprocessing-fork':
mp_fork = True
break
except:
pass
# set argv to the non-read args
sys.argv = sys_argv[0:1] + args
if mp_fork is not None:
# Needs to be first opt for support_freeze to work
sys.argv.insert(1, '--multiprocessing-fork')
else:
opts = []
args = []
need_save = False
for opt, arg in opts:
if opt in ('-h', '--help'):
kivy_usage()
sys.exit(0)
elif opt in ('-p', '--provider'):
try:
pid, args = arg.split(':', 1)
Config.set('input', pid, args)
except ValueError:
# when we are doing an executable on macosx with
# pyinstaller, they are passing information with -p. so
# it will conflict with our current -p option. since the
# format is not the same, just avoid it.
pass
elif opt in ('-a', '--auto-fullscreen'):
Config.set('graphics', 'fullscreen', 'auto')
elif opt in ('-c', '--config'):
ol = arg.split(':', 2)
if len(ol) == 2:
Config.set(ol[0], ol[1], '')
elif len(ol) == 3:
Config.set(ol[0], ol[1], ol[2])
else:
raise Exception('Invalid --config value')
if ol[0] == 'kivy' and ol[1] == 'log_level':
level = LOG_LEVELS.get(Config.get('kivy', 'log_level'))
Logger.setLevel(level=level)
elif opt in ('-k', '--fake-fullscreen'):
Config.set('graphics', 'fullscreen', 'fake')
elif opt in ('-f', '--fullscreen'):
Config.set('graphics', 'fullscreen', '1')
elif opt in ('-w', '--windowed'):
Config.set('graphics', 'fullscreen', '0')
elif opt in ('--size', ):
w, h = str(arg).split('x')
Config.set('graphics', 'width', w)
Config.set('graphics', 'height', h)
elif opt in ('--display', ):
Config.set('graphics', 'display', str(arg))
elif opt in ('-m', '--module'):
if str(arg) == 'list':
from kivy.modules import Modules
Modules.usage_list()
sys.exit(0)
args = arg.split(':', 1)
if len(args) == 1:
args += ['']
Config.set('modules', args[0], args[1])
elif opt in ('-s', '--save'):
need_save = True
elif opt in ('-r', '--rotation'):
Config.set('graphics', 'rotation', arg)
elif opt in ('-d', '--debug'):
level = LOG_LEVELS.get('debug')
Logger.setLevel(level=level)
elif opt == '--dpi':
environ['KIVY_DPI'] = arg
if need_save and 'KIVY_NO_CONFIG' not in environ:
try:
with open(kivy_config_fn, 'w') as fd:
Config.write(fd)
except Exception as e:
Logger.exception('Core: error while saving default'
'configuration file:', str(e))
Logger.info('Core: Kivy configuration saved.')
sys.exit(0)
# configure all activated modules
from kivy.modules import Modules
Modules.configure()
# android hooks: force fullscreen and add android touch input provider
if platform in ('android', 'ios'):
from kivy.config import Config
Config.set('graphics', 'fullscreen', 'auto')
Config.remove_section('input')
Config.add_section('input')
if platform == 'android':
Config.set('input', 'androidtouch', 'android')
for msg in _logging_msgs:
Logger.info(msg)
if RELEASE:
Logger.info('Kivy: v%s' % __version__)
elif not RELEASE and __hash__ and __date__:
Logger.info('Kivy: v%s, git-%s, %s' % (__version__, __hash__, __date__))
Logger.info('Kivy: Installed at "{}"'.format(__file__))
Logger.info('Python: v{}'.format(sys.version))
Logger.info('Python: Interpreter at "{}"'.format(sys.executable))
from kivy.logger import file_log_handler
if file_log_handler is not None:
file_log_handler.purge_logs()
|
|
'''
The MIT License (MIT)
Copyright (c) 2016 Bonggun Shin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from gensim import utils
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
from sklearn.linear_model import LogisticRegression
# shuffle
from random import shuffle
# logging
import logging
import os.path
import sys
import time
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
class Timer(object):
def __init__(self, name=None, logger=None):
self.logger = logger
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.logger is None:
if self.name:
print '[%s]' % self.name,
print 'Elapsed: %s' % (time.time() - self.tstart)
else:
if self.name:
self.logger.info("[%s] Elapsed: %s" % (self.name, (time.time() - self.tstart)))
else:
self.logger.info('Elapsed: %s' % (time.time() - self.tstart))
class LabeledLineSentence(object):
def __init__(self, sources):
self.sources = sources
flipped = {}
# make sure that keys are unique
for key, value in sources.items():
if value not in flipped:
flipped[value] = [key]
else:
raise Exception('Non-unique prefix encountered')
def __iter__(self):
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
yield TaggedDocument(utils.to_unicode(line).split(), [prefix + '_%s' % item_no])
def to_array(self):
self.sentences = []
for source, prefix in self.sources.items():
with utils.smart_open(source) as fin:
for item_no, line in enumerate(fin):
self.sentences.append(TaggedDocument(
utils.to_unicode(line).split(), [prefix + '_%s' % item_no]))
return self.sentences
def sentences_perm(self):
shuffle(self.sentences)
return self.sentences
class SentimentAnalyzer(object):
def __init__(self):
program = os.path.basename(sys.argv[0])
self.logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
def evaluate(self):
classifier = LogisticRegression()
classifier.fit(self.train_arrays, self.train_labels)
score = classifier.score(self.test_arrays, self.test_labels)
self.logger.info("Score is %s!!" % str(score))
def evaluate_nn(self):
nn_model = Sequential()
nn_model.add(Embedding(self.dim, 200))
nn_model.add(LSTM(200)) # try using a GRU instead, for fun
nn_model.add(Dropout(0.5))
nn_model.add(Dense(1))
nn_model.add(Activation('sigmoid'))
nn_model.compile(loss='binary_crossentropy',
optimizer='adam',
class_mode="binary")
print("Train...")
nn_model.fit(self.train_arrays, self.train_labels, batch_size=32, nb_epoch=3,
validation_data=(self.test_arrays, self.test_labels), show_accuracy=True)
score, acc = nn_model.evaluate(self.test_arrays, self.test_labels,
batch_size=32,
show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)
def make_word_data(self):
# self.train_word_data, self.test_word_data = self._get_data(self.train_sources, self.test_sources)
train_sentences = LabeledLineSentence(self.train_sources)
self.train_word_data = train_sentences.to_array()
self.n_train_neg = sum(1 for d in self.train_word_data if "TRAIN_NEG" in d[1][0])
self.n_train_pos = sum(1 for d in self.train_word_data if "TRAIN_POS" in d[1][0])
self.n_train = self.n_train_neg+self.n_train_pos
test_sentences = LabeledLineSentence(self.test_sources)
self.test_word_data = test_sentences.to_array()
self.n_test_neg = sum(1 for d in self.test_word_data if "TEST_NEG" in d[1][0])
self.n_test_pos = sum(1 for d in self.test_word_data if "TEST_POS" in d[1][0])
self.n_test = self.n_test_neg+self.n_test_pos
class SentimentBoW(SentimentAnalyzer):
def __init__(self, train_sources, test_sources, vocab_sources):
super(SentimentBoW, self).__init__()
self.logger.info("running %s" % ' '.join(sys.argv))
self.train_sources = train_sources
self.test_sources = test_sources
self.vocab_sources = vocab_sources
self.vocab = None
self.train_word_data = None
self.test_word_data = None
def make_vocab(self):
sentences = LabeledLineSentence(self.vocab_sources)
model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=7)
model.build_vocab(sentences.to_array())
self.vocab = model.vocab
self.vocab_len = len(self.vocab)
def to_sparse_vector(self, words):
sparse_vec = np.zeros([self.vocab_len,], dtype=np.int8)
for w in words:
try:
sparse_vec[self.vocab[w].index] = 1
except KeyError:
pass
return sparse_vec
def make_dataset(self):
if self.vocab is None:
with Timer("make_vocab", self.logger):
self.make_vocab()
if self.train_word_data is None or self.test_word_data is None:
with Timer("make_word_data", self.logger):
self.make_word_data()
with Timer("make dataset", self.logger):
self.train_arrays = np.zeros((self.n_train, self.vocab_len))
self.train_labels = np.zeros(self.n_train)
for i in range(self.n_train):
if "TRAIN_NEG" in self.train_word_data[i][1][0]:
self.train_arrays[i] = self.to_sparse_vector(self.train_word_data[i][0])
self.train_labels[i] = 0
else: # "TRAIN_NEG"
self.train_arrays[i] = self.to_sparse_vector(self.train_word_data[i][0])
self.train_labels[i] = 1
if i%(self.n_train/10)==0:
self.logger.info("making train %s done" % str(i/(self.n_train/100)+10))
del self.train_word_data
self.test_arrays = np.zeros((self.n_test, self.vocab_len))
self.test_labels = np.zeros(self.n_test)
for i in range(self.n_test):
if "TEST_NEG" in self.test_word_data[i][1][0]:
self.test_arrays[i] = self.to_sparse_vector(self.test_word_data[i][0])
self.test_labels[i] = 0
else: # "TEST_NEG"
self.test_arrays[i] = self.to_sparse_vector(self.test_word_data[i][0])
self.test_labels[i] = 1
if i%(self.n_test/10)==0:
self.logger.info("making test %s done" % str(i/(self.n_test/100)+10))
del self.test_word_data
class SentimentDoc2Vec(SentimentAnalyzer):
def __init__(self, train_sources, test_sources, vocab_sources, dim=50):
super(SentimentDoc2Vec, self).__init__()
self.logger.info("running %s" % ' '.join(sys.argv))
self.train_sources = train_sources
self.test_sources = test_sources
self.vocab_sources = vocab_sources
self.dim = dim
self.train_word_data = None
self.test_word_data = None
self.model = None
def make_model(self, fname):
if os.path.isfile(fname):
with Timer("Load model from a file", self.logger):
self.model = Doc2Vec.load('./imdb.d2v')
self.dim = self.model.vector_size
else:
with Timer("build model from documents", self.logger):
sentences = LabeledLineSentence(self.vocab_sources)
model = Doc2Vec(min_count=1, window=10, size=self.dim, sample=1e-4, negative=5, workers=7)
model.build_vocab(sentences.to_array())
for epoch in range(50):
self.logger.info('Epoch %d' % epoch)
model.train(sentences.sentences_perm())
model.save(fname)
self.model = model
def make_dataset(self):
if self.train_word_data is None or self.test_word_data is None:
with Timer("make_word_data", self.logger):
self.make_word_data()
if self.model is None:
self.logger.info('model not ready')
return
with Timer("make dataset", self.logger):
self.train_arrays = np.zeros((self.n_train, self.dim))
self.train_labels = np.zeros(self.n_train)
for i in range(self.n_train):
if "TRAIN_NEG" in self.train_word_data[i][1][0]:
self.train_arrays[i] = self.model.infer_vector(self.train_word_data[i][0])
self.train_labels[i] = 0
else: # "TRAIN_POS"
self.train_arrays[i] = self.model.infer_vector(self.train_word_data[i][0])
self.train_labels[i] = 1
if i%(self.n_train/10)==0:
self.logger.info("making train %s done" % str(i/(self.n_train/100)+10))
self.test_arrays = np.zeros((self.n_test, self.dim))
self.test_labels = np.zeros(self.n_test)
for i in range(self.n_test):
if "TEST_NEG" in self.test_word_data[i][1][0]:
self.test_arrays[i] = self.model.infer_vector(self.test_word_data[i][0])
self.test_labels[i] = 0
else: # "TEST_POS"
self.test_arrays[i] = self.model.infer_vector(self.test_word_data[i][0])
self.test_labels[i] = 1
if i%(self.n_test/10)==0:
self.logger.info("making test %s done" % str(i/(self.n_test/100)+10))
train_sources = {'train-neg.txt':'TRAIN_NEG', 'train-pos.txt':'TRAIN_POS'}
test_sources = {'test-neg.txt':'TEST_NEG', 'test-pos.txt':'TEST_POS'}
vocab_sources = {'train-neg.txt':'TRAIN_NEG', 'train-pos.txt':'TRAIN_POS', 'train-unsup.txt':'TRAIN_UNS'}
# bow = SentimentBoW(train_sources, test_sources, vocab_sources)
# bow.make_dataset()
# bow.evaluate()
# d2v = SentimentDoc2Vec(train_sources, test_sources, vocab_sources, 100)
# d2v.make_model('./imdb.d2v')
# d2v.make_dataset()
# d2v.evaluate()
# d2v.evaluate_nn()
|
|
'''
Support
=======
Activate other frameworks/toolkits inside the kivy event loop.
'''
__all__ = ('install_gobject_iteration', 'install_twisted_reactor',
'uninstall_twisted_reactor', 'install_android')
def install_gobject_iteration():
'''Import and install gobject context iteration inside our event loop.
This is used as soon as gobject is used (like gstreamer).
'''
from kivy.clock import Clock
try:
from gi.repository import GObject as gobject
except ImportError:
import gobject
if hasattr(gobject, '_gobject_already_installed'):
# already installed, don't do it twice.
return
gobject._gobject_already_installed = True
# get gobject mainloop / context
loop = gobject.MainLoop()
gobject.threads_init()
context = loop.get_context()
# schedule the iteration each frame
def _gobject_iteration(*largs):
# XXX we need to loop over context here, otherwise, we might have a lag
loop = 0
while context.pending() and loop < 10:
context.iteration(False)
loop += 1
Clock.schedule_interval(_gobject_iteration, 0)
# -----------------------------------------------------------------------------
# Android support
# -----------------------------------------------------------------------------
g_android_redraw_count = 0
def _android_ask_redraw(*largs):
# after wakeup, we need to redraw more than once, otherwise we get a
# black screen
global g_android_redraw_count
from kivy.core.window import Window
Window.canvas.ask_update()
g_android_redraw_count -= 1
if g_android_redraw_count < 0:
return False
def install_android():
'''Install hooks for the android platform.
* Automatically sleep when the device is paused.
* Automatically kill the application when the return key is pressed.
'''
try:
import android
except ImportError:
print('Android lib is missing, cannot install android hooks')
return
from kivy.clock import Clock
from kivy.logger import Logger
import pygame
Logger.info('Support: Android install hooks')
# Init the library
android.init()
android.map_key(android.KEYCODE_MENU, pygame.K_MENU)
android.map_key(android.KEYCODE_BACK, pygame.K_ESCAPE)
# Check if android should be paused or not.
# If pause is requested, just leave the app.
def android_check_pause(*largs):
# do nothing until android asks for it.
if not android.check_pause():
return
from kivy.app import App
from kivy.base import stopTouchApp
from kivy.logger import Logger
from kivy.core.window import Window
global g_android_redraw_count
# try to get the current running application
Logger.info('Android: Must go into sleep mode, check the app')
app = App.get_running_app()
# no running application, stop our loop.
if app is None:
Logger.info('Android: No app running, stop everything.')
stopTouchApp()
return
# try to go to pause mode
if app.dispatch('on_pause'):
Logger.info('Android: App paused, now wait for resume.')
# app goes in pause mode, wait.
android.wait_for_resume()
# is it a stop or resume ?
if android.check_stop():
# app must stop
Logger.info('Android: Android wants to close our app.')
stopTouchApp()
else:
# app resuming now !
Logger.info('Android: Android has resumed, resume the app.')
app.dispatch('on_resume')
Window.canvas.ask_update()
g_android_redraw_count = 25 # 5 frames/seconds for 5 seconds
Clock.unschedule(_android_ask_redraw)
Clock.schedule_interval(_android_ask_redraw, 1 / 5)
Logger.info('Android: App resume completed.')
# app doesn't support pause mode, just stop it.
else:
Logger.info('Android: App doesn\'t support pause mode, stop.')
stopTouchApp()
Clock.schedule_interval(android_check_pause, 0)
_twisted_reactor_stopper = None
_twisted_reactor_work = None
def install_twisted_reactor(**kwargs):
'''Installs a threaded twisted reactor, which will schedule one
reactor iteration before the next frame only when twisted needs
to do some work.
Any arguments or keyword arguments passed to this function will be
passed on the the threadedselect reactors interleave function. These
are the arguments one would usually pass to twisted's reactor.startRunning.
Unlike the default twisted reactor, the installed reactor will not handle
any signals unless you set the 'installSignalHandlers' keyword argument
to 1 explicitly. This is done to allow kivy to handle the signals as
usual unless you specifically want the twisted reactor to handle the
signals (e.g. SIGINT).
.. note::
Twisted is not included in iOS build by default. To use it on iOS,
put the twisted distribution (and zope.interface dependency) in your
application directory.
'''
import twisted
# prevent installing more than once
if hasattr(twisted, '_kivy_twisted_reactor_installed'):
return
twisted._kivy_twisted_reactor_installed = True
# don't let twisted handle signals, unless specifically requested
kwargs.setdefault('installSignalHandlers', 0)
# install threaded-select reactor, to use with own event loop
from twisted.internet import _threadedselect
_threadedselect.install()
# now we can import twisted reactor as usual
from twisted.internet import reactor
from twisted.internet.error import ReactorNotRunning
from collections import deque
from kivy.base import EventLoop
from kivy.logger import Logger
from kivy.clock import Clock
# will hold callbacks to twisted callbacks
q = deque()
# twisted will call the wake function when it needs to do work
def reactor_wake(twisted_loop_next):
'''Wakeup the twisted reactor to start processing the task queue
'''
Logger.trace("Support: twisted wakeup call to schedule task")
q.append(twisted_loop_next)
# called every frame, to process the reactors work in main thread
def reactor_work(*args):
'''Process the twisted reactor task queue
'''
Logger.trace("Support: processing twisted task queue")
while len(q):
q.popleft()()
global _twisted_reactor_work
_twisted_reactor_work = reactor_work
# start the reactor, by telling twisted how to wake, and process
def reactor_start(*args):
'''Start the twisted reactor main loop
'''
Logger.info("Support: Starting twisted reactor")
reactor.interleave(reactor_wake, **kwargs)
Clock.schedule_interval(reactor_work, 0)
# make sure twisted reactor is shutdown if eventloop exists
def reactor_stop(*args):
'''Shutdown the twisted reactor main loop
'''
if reactor.threadpool:
Logger.info("Support: Stooping twisted threads")
reactor.threadpool.stop()
Logger.info("Support: Shutting down twisted reactor")
reactor._mainLoopShutdown()
try:
reactor.stop()
except ReactorNotRunning:
pass
import sys
sys.modules.pop('twisted.internet.reactor', None)
global _twisted_reactor_stopper
_twisted_reactor_stopper = reactor_stop
# start and stop the reactor along with kivy EventLoop
Clock.schedule_once(reactor_start, 0)
EventLoop.bind(on_stop=reactor_stop)
def uninstall_twisted_reactor():
'''Uninstalls the Kivy's threaded Twisted Reactor. No more Twisted tasks will
run after this got called. Use this to clean the `twisted.internet.reactor`
.. versionadded:: 1.9.0
'''
import twisted
# prevent uninstalling more than once
if not hasattr(twisted, '_kivy_twisted_reactor_installed'):
return
from kivy.base import EventLoop
global _twisted_reactor_stopper
_twisted_reactor_stopper()
EventLoop.unbind(on_stop=_twisted_reactor_stopper)
del twisted._kivy_twisted_reactor_installed
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TaskError
from pants.goal.products import UnionProducts
class ClasspathEntry(object):
"""Represents a java classpath entry."""
def __init__(self, path):
self._path = path
@property
def path(self):
"""Returns the pants internal path of this classpath entry.
Suitable for use in constructing classpaths for pants executions and pants generated artifacts.
:rtype: string
"""
return self._path
def is_excluded_by(self, excludes):
"""Returns `True` if this classpath entry should be excluded given the `excludes` in play.
:param excludes: The excludes to check this classpath entry against.
:type excludes: list of :class:`pants.backend.jvm.targets.exclude.Exclude`
:rtype: bool
"""
return False
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return isinstance(other, ClasspathEntry) and self.path == other.path
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'ClasspathEntry(path={!r})'.format(self.path)
class ArtifactClasspathEntry(ClasspathEntry):
"""Represents a resolved third party classpath entry."""
def __init__(self, path, coordinate, cache_path):
super(ArtifactClasspathEntry, self).__init__(path)
self._coordinate = coordinate
self._cache_path = cache_path
@property
def coordinate(self):
"""Returns the maven coordinate that used to resolve this classpath entry's artifact.
:rtype: :class:`pants.backend.jvm.jar_dependency_utils.M2Coordinate`
"""
return self._coordinate
@property
def cache_path(self):
"""Returns the external cache path of this classpath entry.
For example, the `~/.m2/repository` or `~/.ivy2/cache` location of the resolved artifact for
maven and ivy resolvers respectively.
Suitable for use in constructing classpaths for external tools that should not be subject to
potential volatility in pants own internal caches.
:rtype: string
"""
return self._cache_path
def is_excluded_by(self, excludes):
return any(_matches_exclude(self.coordinate, exclude) for exclude in excludes)
def __hash__(self):
return hash((self.path, self.coordinate, self.cache_path))
def __eq__(self, other):
return (isinstance(other, ArtifactClasspathEntry) and
self.path == other.path and
self.coordinate == other.coordinate and
self.cache_path == other.cache_path)
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('ArtifactClasspathEntry(path={!r}, coordinate={!r}, cache_path={!r})'
.format(self.path, self.coordinate, self.cache_path))
def _matches_exclude(coordinate, exclude):
if not coordinate.org == exclude.org:
return False
if not exclude.name:
return True
if coordinate.name == exclude.name:
return True
return False
def _not_excluded_filter(excludes):
def not_excluded(path_tuple):
conf, classpath_entry = path_tuple
return not classpath_entry.is_excluded_by(excludes)
return not_excluded
class ClasspathProducts(object):
def __init__(self, pants_workdir, classpaths=None, excludes=None):
self._classpaths = classpaths or UnionProducts()
self._excludes = excludes or UnionProducts()
self._pants_workdir = pants_workdir
@staticmethod
def init_func(pants_workdir):
return lambda: ClasspathProducts(pants_workdir)
def copy(self):
"""Returns a copy of this ClasspathProducts.
Edits to the copy's classpaths or exclude associations will not affect the classpaths or
excludes in the original. The copy is shallow though, so edits to the the copy's product values
will mutate the original's product values. See `UnionProducts.copy`.
:rtype: :class:`ClasspathProducts`
"""
return ClasspathProducts(pants_workdir=self._pants_workdir,
classpaths=self._classpaths.copy(),
excludes=self._excludes.copy())
def add_for_targets(self, targets, classpath_elements):
"""Adds classpath path elements to the products of all the provided targets."""
for target in targets:
self.add_for_target(target, classpath_elements)
def add_for_target(self, target, classpath_elements):
"""Adds classpath path elements to the products of the provided target."""
self._add_elements_for_target(target, self._wrap_path_elements(classpath_elements))
def add_jars_for_targets(self, targets, conf, resolved_jars):
"""Adds jar classpath elements to the products of the provided targets.
The resolved jars are added in a way that works with excludes.
"""
classpath_entries = []
for jar in resolved_jars:
if not jar.pants_path:
raise TaskError('Jar: {!s} has no specified path.'.format(jar.coordinate))
cp_entry = ArtifactClasspathEntry(jar.pants_path, jar.coordinate, jar.cache_path)
classpath_entries.append((conf, cp_entry))
for target in targets:
self._add_elements_for_target(target, classpath_entries)
def add_excludes_for_targets(self, targets):
"""Add excludes from the provided targets.
Does not look up transitive excludes.
:param targets: The targets to add excludes for.
:type targets: list of :class:`pants.build_graph.target.Target`
"""
for target in targets:
self._add_excludes_for_target(target)
def remove_for_target(self, target, classpath_elements):
"""Removes the given entries for the target."""
self._classpaths.remove_for_target(target, self._wrap_path_elements(classpath_elements))
def get_for_target(self, target):
"""Gets the classpath products for the given target.
Products are returned in order, respecting target excludes.
:param target: The target to lookup classpath products for.
:returns: The ordered (conf, path) tuples, with paths being either classfile directories or
jars.
:rtype: list of (string, string)
"""
return self.get_for_targets([target])
def get_for_targets(self, targets):
"""Gets the classpath products for the given targets.
Products are returned in order, respecting target excludes.
:param targets: The targets to lookup classpath products for.
:returns: The ordered (conf, path) tuples, with paths being either classfile directories or
jars.
:rtype: list of (string, string)
"""
cp_entries = self.get_classpath_entries_for_targets(targets)
return [(conf, cp_entry.path) for conf, cp_entry in cp_entries]
def get_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the classpath products for the given targets.
Products are returned in order, optionally respecting target excludes.
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
classpath_tuples = self._classpaths.get_for_targets(targets)
if respect_excludes:
return self._filter_by_excludes(classpath_tuples, targets)
else:
return classpath_tuples
def get_artifact_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the artifact classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include external artifact classpath elements (ie: resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ArtifactClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if isinstance(cp_entry, ArtifactClasspathEntry)]
def get_internal_classpath_entries_for_targets(self, targets, respect_excludes=True):
"""Gets the internal classpath products for the given targets.
Products are returned in order, optionally respecting target excludes, and the products only
include internal artifact classpath elements (ie: no resolved jars).
:param targets: The targets to lookup classpath products for.
:param bool respect_excludes: `True` to respect excludes; `False` to ignore them.
:returns: The ordered (conf, classpath entry) tuples.
:rtype: list of (string, :class:`ClasspathEntry`)
"""
classpath_tuples = self.get_classpath_entries_for_targets(targets,
respect_excludes=respect_excludes)
return [(conf, cp_entry) for conf, cp_entry in classpath_tuples
if not isinstance(cp_entry, ArtifactClasspathEntry)]
def _filter_by_excludes(self, classpath_tuples, root_targets):
# Excludes are always applied transitively, so regardless of whether a transitive
# set of targets was included here, their closure must be included.
closure = set()
for root_target in root_targets:
closure.update(root_target.closure(bfs=True))
excludes = self._excludes.get_for_targets(closure)
return filter(_not_excluded_filter(excludes), classpath_tuples)
def _add_excludes_for_target(self, target):
if target.is_exported:
self._excludes.add_for_target(target, [Exclude(target.provides.org,
target.provides.name)])
if isinstance(target, JvmTarget) and target.excludes:
self._excludes.add_for_target(target, target.excludes)
def _wrap_path_elements(self, classpath_elements):
return [(element[0], ClasspathEntry(element[1])) for element in classpath_elements]
def _add_elements_for_target(self, target, elements):
self._validate_classpath_tuples(elements, target)
self._classpaths.add_for_target(target, elements)
def _validate_classpath_tuples(self, classpath, target):
"""Validates that all files are located within the working directory, to simplify relativization.
:param classpath: The list of classpath tuples. Each tuple is a 2-tuple of ivy_conf and
ClasspathEntry.
:param target: The target that the classpath tuple is being registered for.
:raises: `TaskError` when the path is outside the work directory
"""
for classpath_tuple in classpath:
conf, classpath_entry = classpath_tuple
path = classpath_entry.path
if os.path.relpath(path, self._pants_workdir).startswith(os.pardir):
raise TaskError(
'Classpath entry {} for target {} is located outside the working directory "{}".'
.format(path, target.address.spec, self._pants_workdir))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution as distribution_lib
from tensorflow.contrib.distributions.python.ops import distribution_util
# Bijectors must be directly imported because `remove_undocumented` prevents
# individual file imports.
from tensorflow.contrib.distributions.python.ops.bijectors.identity import Identity
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
__all__ = [
"TransformedDistribution",
]
# The following helper functions attempt to statically perform a TF operation.
# These functions make debugging easier since we can do more validation during
# graph construction.
def _static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def _logical_and(*args):
"""Convenience function which attempts to statically `reduce_all`."""
args_ = [_static_value(x) for x in args]
if any(x is not None and not bool(x) for x in args_):
return constant_op.constant(False)
if all(x is not None and bool(x) for x in args_):
return constant_op.constant(True)
if len(args) == 2:
return math_ops.logical_and(*args)
return math_ops.reduce_all(args)
def _logical_equal(x, y):
"""Convenience function which attempts to statically compute `x == y`."""
x_ = _static_value(x)
y_ = _static_value(y)
if x_ is None or y_ is None:
return math_ops.equal(x, y)
return constant_op.constant(np.array_equal(x_, y_))
def _logical_not(x):
"""Convenience function which attempts to statically apply `logical_not`."""
x_ = _static_value(x)
if x_ is None:
return math_ops.logical_not(x)
return constant_op.constant(np.logical_not(x_))
def _concat_vectors(*args):
"""Convenience function which concatenates input vectors."""
args_ = [_static_value(x) for x in args]
if any(x_ is None for x_ in args_):
return array_ops.concat(args, 0)
return constant_op.constant([x_ for vec_ in args_ for x_ in vec_])
def _pick_scalar_condition(pred, cond_true, cond_false):
"""Convenience function which chooses the condition based on the predicate."""
# Note: This function is only valid if all of pred, cond_true, and cond_false
# are scalars. This means its semantics are arguably more like tf.cond than
# tf.select even though we use tf.select to implement it.
pred_ = _static_value(pred)
if pred_ is None:
return array_ops.where(pred, cond_true, cond_false)
return cond_true if pred_ else cond_false
def _ones_like(x):
"""Convenience function attempts to statically construct `ones_like`."""
# Should only be used for small vectors.
if x.get_shape().is_fully_defined():
return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)
return array_ops.ones_like(x)
def _ndims_from_shape(shape):
"""Returns `Tensor`'s `rank` implied by a `Tensor` shape."""
if shape.get_shape().ndims not in (None, 1):
raise ValueError("input is not a valid shape: not 1D")
if not shape.dtype.is_integer:
raise TypeError("input is not a valid shape: wrong dtype")
if shape.get_shape().is_fully_defined():
return constant_op.constant(shape.get_shape().as_list()[0])
return array_ops.shape(shape)[0]
def _is_scalar_from_shape(shape):
"""Returns `True` `Tensor` if `Tensor` shape implies a scalar."""
return _logical_equal(_ndims_from_shape(shape), 0)
class TransformedDistribution(distribution_lib.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
We now describe how a `TransformedDistribution` alters the input/outputs of a
`Distribution` associated with a random variable (rv) `X`.
Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function
of random variable `Y`; write the probability density function `pdf(Y=y) :=
d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y` evaluated at
`y`. Assume that `Y = g(X)` where `g` is a deterministic diffeomorphism,
i.e., a non-random, continuous, differentiable, and invertible function.
Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for the Jacobian
of `g` evaluated at `x`.
A `TransformedDistribution` implements the following operations:
* `sample`
Mathematically: `Y = g(X)`
Programmatically: `bijector.forward(distribution.sample(...))`
* `log_prob`
Mathematically: `(log o pdf)(Y=y) = (log o pdf o g^{-1})(y)
+ (log o abs o det o J o g^{-1})(y)`
Programmatically: `(distribution.log_prob(bijector.inverse(y))
+ bijector.inverse_log_det_jacobian(y))`
* `log_cdf`
Mathematically: `(log o cdf)(Y=y) = (log o cdf o g^{-1})(y)`
Programmatically: `distribution.log_cdf(bijector.inverse(x))`
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), axis=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tf.contrib.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(
shift=-1.,
scale_identity_multiplier=2.,
event_ndims=0),
name="NormalTransformedDistribution")
```
A `TransformedDistribution`'s batch- and event-shape are implied by the base
distribution unless explicitly overridden by `batch_shape` or `event_shape`
arguments. Specifying an overriding `batch_shape` (`event_shape`) is
permitted only if the base distribution has scalar batch-shape (event-shape).
The bijector is applied to the distribution as if the distribution possessed
the overridden shape(s). The following example demonstrates how to construct a
multivariate Normal as a `TransformedDistribution`.
```python
ds = tf.contrib.distributions
# We will create two MVNs with batch_shape = event_shape = 2.
mean = [[-1., 0], # batch:0
[0., 1]] # batch:1
chol_cov = [[[1., 0],
[0, 1]], # batch:0
[[1, 0],
[2, 2]]] # batch:1
mvn1 = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(shift=mean, scale_tril=chol_cov),
batch_shape=[2], # Valid because base_distribution.batch_shape == [].
event_shape=[2]) # Valid because base_distribution.event_shape == [].
mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov)
# mvn1.log_prob(x) == mvn2.log_prob(x)
```
"""
def __init__(self,
distribution,
bijector=None,
batch_shape=None,
event_shape=None,
validate_args=False,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`. `None` means `Identity()`.
batch_shape: `integer` vector `Tensor` which overrides `distribution`
`batch_shape`; valid only if `distribution.is_scalar_batch()`.
event_shape: `integer` vector `Tensor` which overrides `distribution`
`event_shape`; valid only if `distribution.is_scalar_event()`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class. Default:
`bijector.name + distribution.name`.
"""
parameters = locals()
name = name or (("" if bijector is None else bijector.name) +
distribution.name)
with ops.name_scope(name, values=[event_shape, batch_shape]):
# For convenience we define some handy constants.
self._zero = constant_op.constant(0, dtype=dtypes.int32, name="zero")
self._empty = constant_op.constant([], dtype=dtypes.int32, name="empty")
if bijector is None:
bijector = Identity(validate_args=validate_args)
# We will keep track of a static and dynamic version of
# self._is_{batch,event}_override. This way we can do more prior to graph
# execution, including possibly raising Python exceptions.
self._override_batch_shape = self._maybe_validate_shape_override(
batch_shape, distribution.is_scalar_batch(), validate_args,
"batch_shape")
self._is_batch_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_batch_shape), self._zero))
self._is_maybe_batch_override = bool(
tensor_util.constant_value(self._override_batch_shape) is None or
tensor_util.constant_value(self._override_batch_shape).size != 0)
self._override_event_shape = self._maybe_validate_shape_override(
event_shape, distribution.is_scalar_event(), validate_args,
"event_shape")
self._is_event_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_event_shape), self._zero))
self._is_maybe_event_override = bool(
tensor_util.constant_value(self._override_event_shape) is None or
tensor_util.constant_value(self._override_event_shape).size != 0)
# To convert a scalar distribution into a multivariate distribution we
# will draw dims from the sample dims, which are otherwise iid. This is
# easy to do except in the case that the base distribution has batch dims
# and we're overriding event shape. When that case happens the event dims
# will incorrectly be to the left of the batch dims. In this case we'll
# cyclically permute left the new dims.
self._needs_rotation = _logical_and(
self._is_event_override,
_logical_not(self._is_batch_override),
_logical_not(distribution.is_scalar_batch()))
override_event_ndims = _ndims_from_shape(self._override_event_shape)
self._rotate_ndims = _pick_scalar_condition(
self._needs_rotation, override_event_ndims, 0)
# We'll be reducing the head dims (if at all), i.e., this will be []
# if we don't need to reduce.
self._reduce_event_indices = math_ops.range(
self._rotate_ndims - override_event_ndims, self._rotate_ndims)
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
bijector.graph_parents),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def _event_shape_tensor(self):
return self.bijector.forward_event_shape_tensor(
distribution_util.pick_vector(
self._is_event_override,
self._override_event_shape,
self.distribution.event_shape_tensor()))
def _event_shape(self):
# If there's a chance that the event_shape has been overriden, we return
# what we statically know about the `event_shape_override`. This works
# because: `_is_maybe_event_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `event_shape` or we do.
#
# Since the `bijector` may change the `event_shape`, we then forward what we
# know to the bijector. This allows the `bijector` to have final say in the
# `event_shape`.
static_override = tensor_util.constant_value(self._override_event_shape)
return self.bijector.forward_event_shape(
tensor_shape.TensorShape(static_override)
if self._is_maybe_event_override
else self.distribution.event_shape)
def _batch_shape_tensor(self):
return distribution_util.pick_vector(
self._is_batch_override,
self._override_batch_shape,
self.distribution.batch_shape_tensor())
def _batch_shape(self):
# If there's a chance that the batch_shape has been overriden, we return
# what we statically know about the `batch_shape_override`. This works
# because: `_is_maybe_batch_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `batch_shape` or we do.
#
# Notice that this implementation parallels the `_event_shape` except that
# the `bijector` doesn't get to alter the `batch_shape`. Recall that
# `batch_shape` is a property of a distribution while `event_shape` is
# shared between both the `distribution` instance and the `bijector`.
static_override = tensor_util.constant_value(self._override_batch_shape)
return (tensor_shape.TensorShape(static_override)
if self._is_maybe_batch_override
else self.distribution.batch_shape)
def _sample_n(self, n, seed=None):
sample_shape = _concat_vectors(
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty))
x = self.distribution.sample(sample_shape=sample_shape, seed=seed)
x = self._maybe_rotate_dims(x)
return self.bijector.forward(x)
def _log_prob(self, y):
x = self.bijector.inverse(y)
ildj = self.bijector.inverse_log_det_jacobian(y)
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
log_prob = ildj + log_prob
if self._is_maybe_event_override:
log_prob.set_shape(array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape))
return log_prob
def _prob(self, y):
x = self.bijector.inverse(y)
ildj = self.bijector.inverse_log_det_jacobian(y)
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
prob *= math_ops.exp(ildj)
if self._is_maybe_event_override:
prob.set_shape(array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-1], self.batch_shape))
return prob
def _log_cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
x = self.bijector.inverse(y)
return self.distribution.log_cdf(x)
def _cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
x = self.bijector.inverse(y)
return self.distribution.cdf(x)
def _log_survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
x = self.bijector.inverse(y)
return self.distribution.log_survival_function(x)
def _survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
x = self.bijector.inverse(y)
return self.distribution.survival_function(x)
def _entropy(self):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("entropy is not implemented")
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy()
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = array_ops.reshape(entropy, new_shape)
multiples = array_ops.concat([
self._override_batch_shape,
_ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = array_ops.zeros([], self.dtype)
entropy -= self.bijector.inverse_log_det_jacobian(dummy)
entropy.set_shape(self.batch_shape)
return entropy
def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
validate_args, name):
"""Helper to __init__ which ensures override batch/event_shape are valid."""
if override_shape is None:
override_shape = []
override_shape = ops.convert_to_tensor(override_shape, dtype=dtypes.int32,
name=name)
if not override_shape.dtype.is_integer:
raise TypeError("shape override must be an integer")
override_is_scalar = _is_scalar_from_shape(override_shape)
if tensor_util.constant_value(override_is_scalar):
return self._empty
dynamic_assertions = []
if override_shape.get_shape().ndims is not None:
if override_shape.get_shape().ndims != 1:
raise ValueError("shape override must be a vector")
elif validate_args:
dynamic_assertions += [check_ops.assert_rank(
override_shape, 1,
message="shape override must be a vector")]
if tensor_util.constant_value(override_shape) is not None:
if any(s <= 0 for s in tensor_util.constant_value(override_shape)):
raise ValueError("shape override must have positive elements")
elif validate_args:
dynamic_assertions += [check_ops.assert_positive(
override_shape,
message="shape override must have positive elements")]
is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),
_logical_not(override_is_scalar))
if tensor_util.constant_value(is_both_nonscalar) is not None:
if tensor_util.constant_value(is_both_nonscalar):
raise ValueError("base distribution not scalar")
elif validate_args:
dynamic_assertions += [check_ops.assert_equal(
is_both_nonscalar, False,
message="base distribution not scalar")]
if not dynamic_assertions:
return override_shape
return control_flow_ops.with_dependencies(
dynamic_assertions, override_shape)
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
needs_rotation_const = tensor_util.constant_value(self._needs_rotation)
if needs_rotation_const is not None and not needs_rotation_const:
return x
ndims = array_ops.rank(x)
n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims
return array_ops.transpose(
x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))
|
|
#!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Miscellaneous Data I/O
^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = [
"write_polygon_to_text",
"to_pickle",
"from_pickle",
"get_radiosonde",
"get_membership_functions",
]
__doc__ = __doc__.format("\n ".join(__all__))
import datetime as dt
import io
import pickle
import urllib
import warnings
import numpy as np
from wradlib import util
def _write_polygon_to_txt(f, idx, vertices):
f.write(f"{idx[0]} {idx[1]}\n")
for i, v in enumerate(vertices):
f.write(f"{i} ")
f.write(f"{v[0]:f} {v[1]:f} {v[2]:f} {v[3]:f}\n")
def write_polygon_to_text(fname, polygons):
"""Writes Polygons to a Text file which can be interpreted by ESRI \
ArcGIS's "Create Features from Text File (Samples)" tool.
This is (yet) only a convenience function with limited functionality.
E.g. interior rings are not yet supported.
Parameters
----------
fname : str
name of the file to save the vertex data to
polygons : list
list of lists of polygon vertices.
Each vertex itself is a list of 3 coordinate values and an
additional value. The third coordinate and the fourth value may be nan.
Returns
-------
None
Note
----
As Polygons are closed shapes, the first and the last vertex of each
polygon **must** be the same!
Examples
--------
Writes two triangle Polygons to a text file::
poly1 = [[0.,0.,0.,0.],[0.,1.,0.,1.],[1.,1.,0.,2.],[0.,0.,0.,0.]]
poly2 = [[0.,0.,0.,0.],[0.,1.,0.,1.],[1.,1.,0.,2.],[0.,0.,0.,0.]]
polygons = [poly1, poly2]
write_polygon_to_text('polygons.txt', polygons)
The resulting text file will look like this::
Polygon
0 0
0 0.000000 0.000000 0.000000 0.000000
1 0.000000 1.000000 0.000000 1.000000
2 1.000000 1.000000 0.000000 2.000000
3 0.000000 0.000000 0.000000 0.000000
1 0
0 0.000000 0.000000 0.000000 0.000000
1 0.000000 1.000000 0.000000 1.000000
2 1.000000 1.000000 0.000000 2.000000
3 0.000000 0.000000 0.000000 0.000000
END
"""
with open(fname, "w") as f:
f.write("Polygon\n")
count = 0
for vertices in polygons:
_write_polygon_to_txt(f, (count, 0), vertices)
count += 1
f.write("END\n")
def to_pickle(fpath, obj):
"""Pickle object <obj> to file <fpath>"""
output = open(fpath, "wb")
pickle.dump(obj, output)
output.close()
def from_pickle(fpath):
"""Return pickled object from file <fpath>"""
pkl_file = open(fpath, "rb")
obj = pickle.load(pkl_file)
pkl_file.close()
return obj
def get_radiosonde(wmoid, date, cols=None):
"""Download radiosonde data from internet.
Based on http://weather.uwyo.edu/upperair/sounding.html.
Parameters
----------
wmoid : int
WMO radiosonde ID
date : :py:class:`datetime.datetime`
Date and Time
Keyword Arguments
-----------------
cols : tuple
tuple of int or strings describing the columns to consider,
defaults to None (all columns)
Returns
-------
data : :py:class:`numpy:numpy.ndarray`
Structured array of radiosonde data
meta : dict
radiosonde metadata
"""
year = date.strftime("%Y")
month = date.strftime("%m")
day = date.strftime("%d")
hour = date.strftime("%H")
# Radiosondes are only at noon and midnight
hour = "12" if (6 < int(hour) < 18) else "00"
# url
url_str = (
"http://weather.uwyo.edu/cgi-bin/sounding?"
"TYPE=TEXT%3ALIST&"
f"YEAR={year}&MONTH={month}&"
f"FROM={day}{hour}&TO={day}{hour}&STNM={wmoid}&"
"ICE=1"
)
# html request
with urllib.request.urlopen(url_str) as url_request:
response = url_request.read()
# decode string
url_text = response.decode("utf-8")
# first line (eg errormessage)
if url_text.find("<H2>") == -1:
err = url_text.split("\n", 1)[1].split("\n", 1)[0]
raise ValueError(err)
# extract relevant information
url_data = url_text.split("<PRE>")[1].split("</PRE>")[0]
url_meta = url_text.split("<PRE>")[2].split("</PRE>")[0]
# extract empty lines, names, units and data
_, _, names, units, _, url_data = url_data.split("\n", 5)
names = names.split()
units = units.split()
unitdict = {name: unit for (name, unit) in zip(names, units)}
# read data
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
data = np.genfromtxt(
io.StringIO(url_data),
names=names,
dtype=float,
usecols=cols,
autostrip=True,
invalid_raise=False,
)
# read metadata
meta = {}
for i, row in enumerate(io.StringIO(url_meta)):
if i == 0:
continue
k, v = row.split(":")
k = k.strip()
v = v.strip()
if i == 2:
v = int(v)
elif i == 3:
v = dt.datetime.strptime(v, "%y%m%d/%H%M")
elif i > 3:
v = float(v)
meta[k] = v
meta["quantity"] = {item: unitdict[item] for item in data.dtype.names}
return data, meta
def get_membership_functions(filename):
"""Reads membership function parameters from wradlib-data file.
Parameters
----------
filename : str
Filename of wradlib-data file
Returns
-------
msf : :py:class:`numpy:numpy.ndarray`
Array of membership funcions with shape (hm-classes, observables,
indep-ranges, 5)
"""
gzip = util.import_optional("gzip")
with gzip.open(filename, "rb") as f:
nclass = int(f.readline().decode().split(":")[1].strip())
nobs = int(f.readline().decode().split(":")[1].strip())
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
data = np.genfromtxt(f, skip_header=10, autostrip=True, invalid_raise=False)
data = np.reshape(data, (nobs, int(data.shape[0] / nobs), data.shape[1]))
msf = np.reshape(
data, (data.shape[0], nclass, int(data.shape[1] / nclass), data.shape[2])
)
msf = np.swapaxes(msf, 0, 1)
return msf
|
|
"""
Multiclass and multilabel classification strategies
===================================================
This module implements multiclass learning algorithms:
- one-vs-the-rest / one-vs-all
- one-vs-one
- error correcting output codes
The estimators provided in this module are meta-estimators: they require a base
estimator to be provided in their constructor. For example, it is possible to
use these estimators to turn a binary classifier or a regressor into a
multiclass classifier. It is also possible to use these estimators with
multiclass estimators in the hope that their accuracy or runtime performance
improves.
All classifiers in scikit-learn implement multiclass classification; you
only need to use this module if you want to experiment with custom multiclass
strategies.
The one-vs-the-rest meta-classifier also implements a `predict_proba` method,
so long as such a method is implemented by the base classifier. This method
returns probabilities of class membership in both the single label and
multilabel case. Note that in the multilabel case, probabilities are the
marginal probability that a given sample falls in the given class. As such, in
the multilabel case the sum of these probabilities over all possible labels
for a given sample *will not* sum to unity, as they do in the single label
case.
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Author: Hamzeh Alsalhi <93hamsal@gmail.com>
#
# License: BSD 3 clause
import array
import numpy as np
import warnings
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, clone, is_classifier
from .base import MetaEstimatorMixin, is_regressor
from .preprocessing import LabelBinarizer
from .metrics.pairwise import euclidean_distances
from .utils import check_random_state
from .utils.validation import _num_samples
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted
from .utils import deprecated
from .externals.joblib import Parallel
from .externals.joblib import delayed
__all__ = [
"OneVsRestClassifier",
"OneVsOneClassifier",
"OutputCodeClassifier",
]
def _fit_binary(estimator, X, y, classes=None):
"""Fit a single binary estimator."""
unique_y = np.unique(y)
if len(unique_y) == 1:
if classes is not None:
if y[0] == -1:
c = 0
else:
c = y[0]
warnings.warn("Label %s is present in all training examples." %
str(classes[c]))
estimator = _ConstantPredictor().fit(X, unique_y)
else:
estimator = clone(estimator)
estimator.fit(X, y)
return estimator
def _predict_binary(estimator, X):
"""Make predictions using a single binary estimator."""
if is_regressor(estimator):
return estimator.predict(X)
try:
score = np.ravel(estimator.decision_function(X))
except (AttributeError, NotImplementedError):
# probabilities of the positive class
score = estimator.predict_proba(X)[:, 1]
return score
def _check_estimator(estimator):
"""Make sure that an estimator implements the necessary methods."""
if (not hasattr(estimator, "decision_function") and
not hasattr(estimator, "predict_proba")):
raise ValueError("The base estimator should implement "
"decision_function or predict_proba!")
@deprecated("fit_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def fit_ovr(estimator, X, y, n_jobs=1):
"""Fit a one-vs-the-rest strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
estimators : list of estimators object
The list of fitted estimator.
lb : fitted LabelBinarizer
"""
ovr = OneVsRestClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovr.estimators_, ovr.label_binarizer_
@deprecated("predict_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_ovr(estimators, label_binarizer, X):
"""Predict multi-class targets using the one vs rest strategy.
Parameters
----------
estimators : list of `n_classes` estimators, Estimators used for
predictions. The list must be homogeneous with respect to the type of
estimators. fit_ovr supplies this list as part of its output.
label_binarizer : LabelBinarizer object, Object used to transform
multiclass labels to binary labels and vice-versa. fit_ovr supplies
this object as part of its output.
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovr = OneVsRestClassifier(clone(estimators[0]))
ovr.estimators_ = estimators
ovr.label_binarizer_ = label_binarizer
return ovr.predict(X)
@deprecated("predict_proba_ovr is deprecated and will be removed in 0.18."
"Use the OneVsRestClassifier instead.")
def predict_proba_ovr(estimators, X, is_multilabel):
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
Y = np.array([e.predict_proba(X)[:, 1] for e in estimators]).T
if not is_multilabel:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
class _ConstantPredictor(BaseEstimator):
def fit(self, X, y):
self.y_ = y
return self
def predict(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def decision_function(self, X):
check_is_fitted(self, 'y_')
return np.repeat(self.y_, X.shape[0])
def predict_proba(self, X):
check_is_fitted(self, 'y_')
return np.repeat([np.hstack([1 - self.y_, self.y_])],
X.shape[0], axis=0)
class OneVsRestClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-the-rest (OvR) multiclass/multilabel strategy
Also known as one-vs-all, this strategy consists in fitting one classifier
per class. For each classifier, the class is fitted against all the other
classes. In addition to its computational efficiency (only `n_classes`
classifiers are needed), one advantage of this approach is its
interpretability. Since each class is represented by one and one classifier
only, it is possible to gain knowledge about the class by inspecting its
corresponding classifier. This is the most commonly used strategy for
multiclass classification and is a fair default choice.
This strategy can also be used for multilabel learning, where a classifier
is used to predict multiple labels for instance, by fitting on a 2-d matrix
in which cell [i, j] is 1 if sample i has label j and 0 otherwise.
In the multilabel learning literature, OvR is also known as the binary
relevance method.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes` estimators
Estimators used for predictions.
classes_ : array, shape = [`n_classes`]
Class labels.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and
vice-versa.
multilabel_ : boolean
Whether a OneVsRestClassifier is a multilabel classifier.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
# A sparse LabelBinarizer, with sparse_output=True, has been shown to
# outpreform or match a dense label binarizer in all cases and has also
# resulted in less or equal memory consumption in the fit_ovr function
# overall.
self.label_binarizer_ = LabelBinarizer(sparse_output=True)
Y = self.label_binarizer_.fit_transform(y)
Y = Y.tocsc()
columns = (col.toarray().ravel() for col in Y.T)
# In cases where individual estimators are very fast to train setting
# n_jobs > 1 in can results in slower performance due to the overhead
# of spawning threads. See joblib issue #112.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)(
self.estimator, X, column, classes=[
"not %s" % self.label_binarizer_.classes_[i],
self.label_binarizer_.classes_[i]])
for i, column in enumerate(columns))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : (sparse) array-like, shape = [n_samples] or [n_samples, n_classes].
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
if (hasattr(self.estimators_[0], "decision_function") and
is_classifier(self.estimators_[0])):
thresh = 0
else:
thresh = .5
n_samples = _num_samples(X)
if self.label_binarizer_.y_type_ == "multiclass":
maxima = np.empty(n_samples, dtype=float)
maxima.fill(-np.inf)
argmaxima = np.zeros(n_samples, dtype=int)
for i, e in enumerate(self.estimators_):
pred = _predict_binary(e, X)
np.maximum(maxima, pred, out=maxima)
argmaxima[maxima == pred] = i
return self.label_binarizer_.classes_[np.array(argmaxima.T)]
else:
indices = array.array('i')
indptr = array.array('i', [0])
for e in self.estimators_:
indices.extend(np.where(_predict_binary(e, X) > thresh)[0])
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
indicator = sp.csc_matrix((data, indices, indptr),
shape=(n_samples, len(self.estimators_)))
return self.label_binarizer_.inverse_transform(indicator)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by label of classes.
Note that in the multilabel case, each sample can have any number of
labels. This returns the marginal probability that the given sample has
the label in question. For example, it is entirely consistent that two
labels both have a 90% probability of applying to a given sample.
In the single label multiclass case, the rows of the returned matrix
sum to 1.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : (sparse) array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self, 'estimators_')
# Y[i,j] gives the probability that sample i has the label j.
# In the multi-label case, these are not disjoint.
Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T
if len(self.estimators_) == 1:
# Only one estimator, but we still want to return probabilities
# for two classes.
Y = np.concatenate(((1 - Y), Y), axis=1)
if not self.multilabel_:
# Then, probabilities should be normalized to 1.
Y /= np.sum(Y, axis=1)[:, np.newaxis]
return Y
def decision_function(self, X):
"""Returns the distance of each sample from the decision boundary for
each class. This can only be used with estimators which implement the
decision_function method.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "decision_function"):
raise AttributeError(
"Base estimator doesn't have a decision_function attribute.")
return np.array([est.decision_function(X).ravel()
for est in self.estimators_]).T
@property
def multilabel_(self):
"""Whether this is a multilabel classifier"""
return self.label_binarizer_.y_type_.startswith('multilabel')
@property
def classes_(self):
return self.label_binarizer_.classes_
@property
def coef_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "coef_"):
raise AttributeError(
"Base estimator doesn't have a coef_ attribute.")
return np.array([e.coef_.ravel() for e in self.estimators_])
@property
def intercept_(self):
check_is_fitted(self, 'estimators_')
if not hasattr(self.estimators_[0], "intercept_"):
raise AttributeError(
"Base estimator doesn't have an intercept_ attribute.")
return np.array([e.intercept_.ravel() for e in self.estimators_])
def _fit_ovo_binary(estimator, X, y, i, j):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
return _fit_binary(estimator, X[ind[cond]], y_binary, classes=[i, j])
@deprecated("fit_ovo is deprecated and will be removed in 0.18."
"Use the OneVsOneClassifier instead.")
def fit_ovo(estimator, X, y, n_jobs=1):
ovo = OneVsOneClassifier(estimator, n_jobs=n_jobs).fit(X, y)
return ovo.estimators_, ovo.classes_
@deprecated("predict_ovo is deprecated and will be removed in 0.18."
"Use the OneVsOneClassifier instead.")
def predict_ovo(estimators, classes, X):
"""Make predictions using the one-vs-one strategy."""
e_types = set([type(e) for e in estimators if not
isinstance(e, _ConstantPredictor)])
if len(e_types) > 1:
raise ValueError("List of estimators must contain estimators of the"
" same type but contains types {0}".format(e_types))
ovo = OneVsOneClassifier(clone(estimators[0]))
ovo.estimators_ = estimators
ovo.classes_ = classes
return ovo.predict(X)
class OneVsOneClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""One-vs-one multiclass strategy
This strategy consists in fitting one classifier per class pair.
At prediction time, the class which received the most votes is selected.
Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers,
this method is usually slower than one-vs-the-rest, due to its
O(n_classes^2) complexity. However, this method may be advantageous for
algorithms such as kernel algorithms which don't scale well with
`n_samples`. This is because each individual learning problem only involves
a small subset of the data whereas, with one-vs-the-rest, the complete
dataset is used `n_classes` times.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `n_classes * (n_classes - 1) / 2` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
"""
def __init__(self, estimator, n_jobs=1):
self.estimator = estimator
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
check_consistent_length(X, y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y, self.classes_[i], self.classes_[j])
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
def predict(self, X):
"""Estimate the best class label for each sample in X.
This is implemented as ``argmax(decision_function(X), axis=1)`` which
will return the label of the class with most votes by estimators
predicting the outcome of a decision for each possible class pair.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
Y = self.decision_function(X)
return self.classes_[Y.argmax(axis=1)]
def decision_function(self, X):
"""Decision function for the OneVsOneClassifier.
The decision values for the samples are computed by adding the
normalized sum of pair-wise classification confidence levels to the
votes in order to disambiguate between the decision values when the
votes for all the classes are equal leading to a tie.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
Y : array-like, shape = [n_samples, n_classes]
"""
check_is_fitted(self, 'estimators_')
n_samples = X.shape[0]
n_classes = self.classes_.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = self.estimators_[k].predict(X)
confidence_levels_ij = _predict_binary(self.estimators_[k], X)
sum_of_confidences[:, i] -= confidence_levels_ij
sum_of_confidences[:, j] += confidence_levels_ij
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
@deprecated("fit_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def fit_ecoc(estimator, X, y, code_size=1.5, random_state=None, n_jobs=1):
"""Fit an error-correcting output-code strategy.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float, optional
Percentage of the number of classes to be used to create the code book.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
Returns
--------
estimators : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
"""
ecoc = OutputCodeClassifier(estimator, random_state=random_state,
n_jobs=n_jobs).fit(X, y)
return ecoc.estimators_, ecoc.classes_, ecoc.code_book_
@deprecated("predict_ecoc is deprecated and will be removed in 0.18."
"Use the OutputCodeClassifier instead.")
def predict_ecoc(estimators, classes, code_book, X):
"""Make predictions using the error-correcting output-code strategy."""
ecoc = OutputCodeClassifier(clone(estimators[0]))
ecoc.classes_ = classes
ecoc.estimators_ = estimators
ecoc.code_book_ = code_book
return ecoc.predict(X)
class OutputCodeClassifier(BaseEstimator, ClassifierMixin, MetaEstimatorMixin):
"""(Error-Correcting) Output-Code multiclass strategy
Output-code based strategies consist in representing each class with a
binary code (an array of 0s and 1s). At fitting time, one binary
classifier per bit in the code book is fitted. At prediction time, the
classifiers are used to project new points in the class space and the class
closest to the points is chosen. The main advantage of these strategies is
that the number of classifiers used can be controlled by the user, either
for compressing the model (0 < code_size < 1) or for making the model more
robust to errors (code_size > 1). See the documentation for more details.
Parameters
----------
estimator : estimator object
An estimator object implementing `fit` and one of `decision_function`
or `predict_proba`.
code_size : float
Percentage of the number of classes to be used to create the code book.
A number between 0 and 1 will require fewer classifiers than
one-vs-the-rest. A number greater than 1 will require more classifiers
than one-vs-the-rest.
random_state : numpy.RandomState, optional
The generator used to initialize the codebook. Defaults to
numpy.random.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
estimators_ : list of `int(n_classes * code_size)` estimators
Estimators used for predictions.
classes_ : numpy array of shape [n_classes]
Array containing labels.
code_book_ : numpy array of shape [n_classes, code_size]
Binary array containing the code of each class.
References
----------
.. [1] "Solving multiclass learning problems via error-correcting output
codes",
Dietterich T., Bakiri G.,
Journal of Artificial Intelligence Research 2,
1995.
.. [2] "The error coding method and PICTs",
James G., Hastie T.,
Journal of Computational and Graphical statistics 7,
1998.
.. [3] "The Elements of Statistical Learning",
Hastie T., Tibshirani R., Friedman J., page 606 (second-edition)
2008.
"""
def __init__(self, estimator, code_size=1.5, random_state=None, n_jobs=1):
self.estimator = estimator
self.code_size = code_size
self.random_state = random_state
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : numpy array of shape [n_samples]
Multi-class targets.
Returns
-------
self
"""
if self.code_size <= 0:
raise ValueError("code_size should be greater than 0, got {1}"
"".format(self.code_size))
_check_estimator(self.estimator)
random_state = check_random_state(self.random_state)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
code_size_ = int(n_classes * self.code_size)
# FIXME: there are more elaborate methods than generating the codebook
# randomly.
self.code_book_ = random_state.random_sample((n_classes, code_size_))
self.code_book_[self.code_book_ > 0.5] = 1
if hasattr(self.estimator, "decision_function"):
self.code_book_[self.code_book_ != 1] = -1
else:
self.code_book_[self.code_book_ != 1] = 0
classes_index = dict((c, i) for i, c in enumerate(self.classes_))
Y = np.array([self.code_book_[classes_index[y[i]]]
for i in range(X.shape[0])], dtype=np.int)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_binary)(self.estimator, X, Y[:, i])
for i in range(Y.shape[1]))
return self
def predict(self, X):
"""Predict multi-class targets using underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
Returns
-------
y : numpy array of shape [n_samples]
Predicted multi-class targets.
"""
check_is_fitted(self, 'estimators_')
Y = np.array([_predict_binary(e, X) for e in self.estimators_]).T
pred = euclidean_distances(Y, self.code_book_).argmin(axis=1)
return self.classes_[pred]
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.keras.python.keras import testing_utils
from tensorflow.python.platform import test
class LSTMLayerTest(test.TestCase):
def test_return_sequences_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.LSTM,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_statefulness_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.LSTM
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_regularization_LSTM(self):
embedding_dim = 4
layer_class = keras.layers.LSTM
with self.test_session():
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((2, 3, 2))))
self.assertEqual(len(layer.losses), 4)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=keras.constraints.max_norm(0.01),
recurrent_constraint=keras.constraints.max_norm(0.01),
bias_constraint='max_norm')
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.constraints), 3)
def test_with_masking_layer_LSTM(self):
layer_class = keras.layers.LSTM
with self.test_session():
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = keras.layers.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = keras.layers.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer.inbound_nodes[0].input_tensors
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.backend.random_normal_variable(
(num_samples, units), 0, 1)
for _ in range(num_states)]
layer = keras.layers.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
layer = keras.layers.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
np.testing.assert_allclose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
np.testing.assert_allclose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
with self.test_session():
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = keras.layers.LSTM(units)(inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
if __name__ == '__main__':
test.main()
|
|
"""Functions for dealing with evoked BOLD response from brain regions.
The functions in this module are integrated with the lyman hierarchy
and use Python tools (nibabel and nitime) for processing.
"""
import os
import os.path as op
from glob import glob
import hashlib
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
import pandas as pd
import nibabel as nib
import nitime as nit
from lyman import gather_project_info
def extract_subject(subj, mask_name, summary_func=np.mean,
residual=False, exp_name=None):
"""Extract timeseries from within a mask, summarizing flexibly.
Parameters
----------
subj : string
subject name
mask_name : string
name of mask in data hierarchy
summary_func : callable or None
callable to reduce data over voxel dimensions. can take an
``axis`` argument to operate over each frame, if this
argument does not exist the function will be called on the
n_tr x n_voxel array. if None, simply returns all voxels.
residual : boolean
If True, extract from the registered residual timecourse.
exp_name : string
experiment name, if not using the default experiment
Returns
-------
data : dict with ndarray
datta array is n_runs x n_timepoint x n_dimension,
data are not otherwise altered
"""
project = gather_project_info()
if exp_name is None:
exp_name = project["default_exp"]
# Get a path to the file where
cache_dir = op.join(project["analysis_dir"],
exp_name, subj, "evoked")
try:
os.makedirs(cache_dir)
except OSError:
pass
if summary_func is None:
func_name = ""
else:
func_name = summary_func.__name__
cache_fname = mask_name + "_" + func_name
cache_fname = cache_fname.strip("_") + ".npz"
cache_file = op.join(cache_dir, cache_fname)
# Get paths to the relevant files
mask_file = op.join(project["data_dir"], subj, "masks",
"%s.nii.gz" % mask_name)
ts_dir = op.join(project["analysis_dir"], exp_name, subj,
"reg", "epi", "unsmoothed")
n_runs = len(glob(op.join(ts_dir, "run_*")))
ftemp = op.join(ts_dir, "run_{:d}/{}_xfm.nii.gz")
fstem = "res4d" if residual else "timeseries"
ts_files = [ftemp.format(r_i, fstem) for r_i in range(n_runs)]
# Get the hash value for this extraction
cache_hash = hashlib.sha1()
cache_hash.update(mask_name)
cache_hash.update(str(op.getmtime(mask_file)))
for ts_file in ts_files:
cache_hash.update(str(op.getmtime(ts_file)))
cache_hash = cache_hash.hexdigest()
# If the file exists and the hash matches, return the data
if op.exists(cache_file):
with np.load(cache_file) as cache_obj:
if cache_hash == str(cache_obj["hash"]):
return dict(cache_obj.items())
# Otherwise, do the extraction
data = []
mask = nib.load(mask_file).get_data().astype(bool)
for run, ts_file in enumerate(ts_files):
ts_data = nib.load(ts_file).get_data()
roi_data = ts_data[mask].T
if summary_func is None:
data.append(roi_data)
continue
# Try to use the axis argument to summarize over voxels
try:
roi_data = summary_func(roi_data, axis=1)
# Catch a TypeError and just call the function
# This lets us do e.g. a PCA
except TypeError:
roi_data = summary_func(roi_data)
data.append(roi_data)
data = np.array(list(map(np.squeeze, data)))
# Save the results and return them
data_dict = dict(data=data, subj=subj, hash=cache_hash)
np.savez(cache_file, **data_dict)
return data_dict
def extract_group(mask_name, summary_func=np.mean,
exp_name=None, subjects=None, dv=None):
"""Extract timeseries from within a mask, summarizing flexibly.
Parameters
----------
mask_name : string
name of mask in data hierarchy
summary_func : callable or None
callable to reduce data over voxel dimensions. can take an
``axis`` argument to operate over each frame, if this
argument does not exist the function will be called on the
n_tr x n_voxel array. if None, simply returns all voxels.
exp_name : string
experiment name, if not using the default experiment
subjects : sequence of strings
subjects to operate over if not using default subject list
dv : IPython cluster direct view
if provided with view on cluster, executes in parallel over
subjects
Returns
-------
data : list of dicts with ndarrays
each array is squeezed n_runs x n_timepoint x n_dimension
data is not otherwise altered
"""
if dv is None:
import __builtin__
_map = __builtin__.map
else:
_map = dv.map_sync
if subjects is None:
subj_file = op.join(os.environ["LYMAN_DIR"], "subjects.txt")
subjects = np.loadtxt(subj_file, str).tolist()
mask_name = [mask_name for s in subjects]
summary_func = [summary_func for s in subjects]
exp_name = [exp_name for s in subjects]
data = _map(extract_subject, subjects, mask_name,
summary_func, exp_name)
for d in data:
d["data"] = np.asarray(d["data"])
return data
def calculate_evoked(data, n_bins, problem=None, events=None, tr=2,
calc_method="FIR", offset=0, upsample=1,
percent_change=True, correct_baseline=True,
event_names=None):
"""Calcuate an evoked response for a list of datapoints.
Parameters
----------
data : sequence of n_run x n_tp arrays
timeseries data
n_bins : int
number of bins for the peristumulus trace
problem : string
problem name for event file in data hierarchy
overrides `events` if both are passed
events : dataframe or list of dataframes
one dataframe describing event information for each subj.
must contain `onset`, `run`, and `condition` columns
caution: `run` should be 1-based
tr : int
original time resolution of the data
upsample : int
factor to upsample the data with using cubic splines
calc_method : string
name of method on nitime EventRelatedAnalyzer object to
calculate the evoked response.
offset : float
value to adjust onset times by
percent_change : boolean
if True, convert signal to percent change by run
correct_baseline : boolean
if True, adjust evoked trace to be 0 in first bin
event_names : list of strings
names of conditions, otherwise uses sorted unique
values for the condition field in the event dataframe
Returns
-------
evoked : squeezed n_obs x n_class x n_bins array
evoked response, by observation and event type
"""
project = gather_project_info()
design_template = op.join(project["data_dir"], "%s",
"design/%s.csv" % problem)
evoked = []
for i, data_i in enumerate(data):
# Can get event information in one of two ways
if problem is not None:
subj = data_i["subj"]
events_i = pd.read_csv(design_template % subj)
else:
events_i = events[i]
# Map from event names to integer index values
if event_names is None:
event_names = sorted(events_i.condition.unique())
event_map = pd.Series(range(1, len(event_names) + 1),
index=event_names)
# Create the timeseries of event occurances
calc_tr = float(tr) / upsample
event_list = []
data_list = []
for run, run_data in enumerate(data_i["data"], 1):
# Possibly upsample the data
if upsample != 1:
time_points = len(run_data)
x = np.linspace(0, time_points - 1, time_points)
xx = np.linspace(0, time_points,
time_points * upsample + 1)[:-upsample]
interpolator = interp1d(x, run_data, "cubic", axis=0)
run_data = interpolator(xx)
run_events = events_i[events_i.run == run]
run_events.onset += offset
event_id = np.zeros(len(run_data), int)
event_index = np.array(run_events.onset / calc_tr).astype(int)
event_id[event_index] = run_events.condition.map(event_map)
event_list.append(event_id)
if percent_change:
run_data = nit.utils.percent_change(run_data, ax=0)
data_list.append(run_data)
# Set up the Nitime objects
event_info = np.concatenate(event_list)
data = np.concatenate(data_list, axis=0)
# Do the calculations
calc_bins = n_bins * upsample
if data.ndim == 1:
evoked_data = _evoked_1d(data, event_info, calc_bins, calc_tr,
calc_method, correct_baseline)
elif data.ndim == 2:
evoked_data = _evoked_2d(data, event_info, n_bins, calc_tr,
calc_method, correct_baseline)
evoked.append(evoked_data)
return np.array(evoked).squeeze()
def _evoked_1d(data, events, n_bins, tr, calc_method, correct_baseline):
events_ts = nit.TimeSeries(events, sampling_interval=tr)
data_ts = nit.TimeSeries(data, sampling_interval=tr)
analyzer = nit.analysis.EventRelatedAnalyzer(data_ts, events_ts, n_bins)
evoked_data = getattr(analyzer, calc_method)
evoked_data = np.asarray(evoked_data).T.astype(float)
if evoked_data.ndim == 1:
evoked_data = np.array([evoked_data])
if correct_baseline:
evoked_data = evoked_data - evoked_data[:, 0, None]
return evoked_data
def _evoked_2d(data, events, n_bins, tr, calc_method, correct_baseline):
evoked_data = []
for data_i in data.T:
evoked_data_i = _evoked_1d(data_i, events, n_bins, tr,
calc_method, correct_baseline)
evoked_data.append(evoked_data_i)
evoked_data = np.transpose(evoked_data, (1, 2, 0))
return evoked_data
def integrate_evoked(evoked, axis=-1):
"""Integrate a peristumulus timecourse.
Parameters
----------
evoked : list of 2D arrays or 2D array
values of evoked datapoints
Returns
-------
int_evoked : squeezed array
evoked values integrated over the time dimension
"""
return sp.trapz(evoked, axis=axis)
|
|
# -*- coding: utf-8 -*-
import copy
import glob
import io
import os
import struct
import sys
import time
import unittest
from PIL import Image
import piexif
from piexif import _common, ImageIFD, ExifIFD, GPSIFD, TAGS, InvalidImageDataError
from piexif import _webp
from piexif import helper
print("piexif version: {}".format(piexif.VERSION))
INPUT_FILE1 = os.path.join("tests", "images", "01.jpg")
INPUT_FILE2 = os.path.join("tests", "images", "02.jpg")
INPUT_FILE_PEN = os.path.join("tests", "images", "r_pen.jpg")
NOEXIF_FILE = os.path.join("tests", "images", "noexif.jpg")
# JPEG without APP0 and APP1 segments
NOAPP01_FILE = os.path.join("tests", "images", "noapp01.jpg")
INPUT_FILE_TIF = os.path.join("tests", "images", "01.tif")
with open(INPUT_FILE1, "rb") as f:
I1 = f.read()
with open(INPUT_FILE2, "rb") as f:
I2 = f.read()
ZEROTH_IFD = {ImageIFD.Software: b"PIL", # ascii
ImageIFD.Make: b"Make", # ascii
ImageIFD.Model: b"XXX-XXX", # ascii
ImageIFD.ResolutionUnit: 65535, # short
ImageIFD.BitsPerSample: (24, 24, 24), # short * 3
ImageIFD.XResolution: (4294967295, 1), # rational
ImageIFD.BlackLevelDeltaH: ((1, 1), (1, 1), (1, 1)), # srational
ImageIFD.ZZZTestSlong1: -11,
ImageIFD.ZZZTestSlong2: (-11, -11, -11, -11),
}
EXIF_IFD = {ExifIFD.DateTimeOriginal: b"2099:09:29 10:10:10", # ascii
ExifIFD.LensMake: b"LensMake", # ascii
ExifIFD.OECF: b"\xaa\xaa\xaa\xaa\xaa\xaa", # undefined
ExifIFD.Sharpness: 65535, # short
ExifIFD.ISOSpeed: 4294967295, # long
ExifIFD.ExposureTime: (4294967295, 1), # rational
ExifIFD.LensSpecification: ((1, 1), (1, 1), (1, 1), (1, 1)),
ExifIFD.ExposureBiasValue: (2147483647, -2147483648), # srational
}
GPS_IFD = {GPSIFD.GPSVersionID: (0, 0, 0, 1), # byte
GPSIFD.GPSAltitudeRef: 1, # byte
GPSIFD.GPSDateStamp: b"1999:99:99 99:99:99", # ascii
GPSIFD.GPSDifferential: 65535, # short
GPSIFD.GPSLatitude: (4294967295, 1), # rational
}
FIRST_IFD = {ImageIFD.Software: b"PIL", # ascii
ImageIFD.Make: b"Make", # ascii
ImageIFD.Model: b"XXX-XXX", # ascii
ImageIFD.BitsPerSample: (24, 24, 24), # short * 3
ImageIFD.BlackLevelDeltaH: ((1, 1), (1, 1), (1, 1)), # srational
}
INTEROP_IFD = {piexif.InteropIFD.InteroperabilityIndex: b"R98"}
def load_exif_by_PIL(f):
i = Image.open(f)
e = i._getexif()
i.close()
return e
def pack_byte(*args):
return struct.pack("B" * len(args), *args)
class ExifTests(unittest.TestCase):
"""tests for main five functions."""
# load ------
def test_no_exif_load(self):
exif_dict = piexif.load(NOEXIF_FILE)
none_dict = {"0th":{},
"Exif":{},
"GPS":{},
"Interop":{},
"1st":{},
"thumbnail":None}
self.assertEqual(exif_dict, none_dict)
def test_load(self):
files = glob.glob(os.path.join("tests", "images", "r_*.jpg"))
for input_file in files:
exif = piexif.load(input_file)
e = load_exif_by_PIL(input_file)
print("********************\n" + input_file + "\n")
self._compare_piexifDict_PILDict(exif, e, p=False)
def test_load_m(self):
"""'load' on memory.
"""
exif = piexif.load(I1)
e = load_exif_by_PIL(INPUT_FILE1)
print("********************\n\n" + INPUT_FILE1 + "\n")
self._compare_piexifDict_PILDict(exif, e)
def test_load_tif(self):
exif = piexif.load(INPUT_FILE_TIF)
zeroth_ifd = exif["0th"]
exif_bytes = piexif.dump({"0th":zeroth_ifd})
im = Image.new("RGB", (8, 8))
o = io.BytesIO()
im.save(o, format="jpeg", exif=exif_bytes)
im.close()
exif2 = piexif.load(o.getvalue())
zeroth_ifd2 = exif2["0th"]
self.assertDictEqual(zeroth_ifd, zeroth_ifd2)
def test_load_tif_m(self):
with open(INPUT_FILE_TIF, "rb") as f:
tif = f.read()
exif = piexif.load(tif)
zeroth_ifd = exif["0th"]
exif_bytes = piexif.dump({"0th":zeroth_ifd})
im = Image.new("RGB", (8, 8))
o = io.BytesIO()
im.save(o, format="jpeg", exif=exif_bytes)
im.close()
exif2 = piexif.load(o.getvalue())
zeroth_ifd2 = exif2["0th"]
self.assertDictEqual(zeroth_ifd, zeroth_ifd2)
def test_load_from_pilImage_property(self):
o = io.BytesIO()
i = Image.open(INPUT_FILE1)
exif = i.info["exif"]
exif_dict = piexif.load(exif)
exif_bytes = piexif.dump(exif_dict)
i.save(o, "jpeg", exif=exif_bytes)
i.close()
o.seek(0)
Image.open(o).close()
def test_load_name_dict(self):
thumbnail_io = io.BytesIO()
thumb = Image.open(INPUT_FILE2)
thumb.thumbnail((40, 40))
thumb.save(thumbnail_io, "JPEG")
thumb.close()
thumb_data = thumbnail_io.getvalue()
exif_dict = {"0th":ZEROTH_IFD,
"Exif":EXIF_IFD,
"GPS":GPS_IFD,
"Interop":INTEROP_IFD,
"1st":FIRST_IFD,
"thumbnail":thumb_data}
exif_bytes = piexif.dump(exif_dict)
im = Image.new("RGB", (80, 80))
o = io.BytesIO()
im.save(o, format="jpeg", exif=exif_bytes)
im.close()
o.seek(0)
exif = piexif.load(o.getvalue(), True)
print(exif)
def test_load_unicode_filename(self):
input_file = os.path.join(u"tests", u"images", u"r_sony.jpg")
exif = piexif.load(input_file)
e = load_exif_by_PIL(input_file)
self._compare_piexifDict_PILDict(exif, e, p=False)
# dump ------
def test_no_exif_dump(self):
o = io.BytesIO()
exif_bytes = piexif.dump({})
i = Image.new("RGB", (8, 8))
i.save(o, format="jpeg", exif=exif_bytes)
o.seek(0)
exif_dict2 = load_exif_by_PIL(o)
self.assertDictEqual({}, exif_dict2)
def test_dump(self):
exif_dict = {"0th":ZEROTH_IFD, "Exif":EXIF_IFD, "GPS":GPS_IFD}
t = time.time()
exif_bytes = piexif.dump(exif_dict)
t_cost = time.time() - t
print("'dump': {}[sec]".format(t_cost))
im = Image.new("RGB", (8, 8))
o = io.BytesIO()
im.save(o, format="jpeg", exif=exif_bytes)
im.close()
o.seek(0)
exif = load_exif_by_PIL(o)
def test_dump_fail(self):
with open(os.path.join("tests", "images", "large.jpg"), "rb") as f:
thumb_data = f.read()
exif_dict = {"0th":ZEROTH_IFD,
"Exif":EXIF_IFD,
"GPS":GPS_IFD,
"Interop":INTEROP_IFD,
"1st":FIRST_IFD,
"thumbnail":thumb_data}
with self.assertRaises(ValueError):
piexif.dump(exif_dict)
def test_dump_fail2(self):
exif_ifd = {ExifIFD.DateTimeOriginal: 123}
exif_dict = {"Exif":exif_ifd}
with self.assertRaises(ValueError):
piexif.dump(exif_dict)
def test_dump_fail3(self):
exif_ifd = {ExifIFD.OECF: 1}
exif_dict = {"Exif":exif_ifd}
with self.assertRaises(ValueError):
piexif.dump(exif_dict)
def test_dump_fail4(self):
exif_ifd = {ExifIFD.OECF: (1, 2, 3, 4, 5)}
exif_dict = {"Exif":exif_ifd}
with self.assertRaises(ValueError):
piexif.dump(exif_dict)
# load and dump ------
def test_dump_and_load(self):
exif_dict = {"0th":ZEROTH_IFD, "Exif":EXIF_IFD, "GPS":GPS_IFD}
exif_bytes = piexif.dump(exif_dict)
im = Image.new("RGB", (8, 8))
o = io.BytesIO()
im.save(o, format="jpeg", exif=exif_bytes)
im.close()
o.seek(0)
exif = piexif.load(o.getvalue())
zeroth_ifd, exif_ifd, gps_ifd = exif["0th"], exif["Exif"], exif["GPS"]
zeroth_ifd.pop(ImageIFD.ExifTag) # pointer to exif IFD
zeroth_ifd.pop(ImageIFD.GPSTag) # pointer to GPS IFD
self.assertDictEqual(ZEROTH_IFD, zeroth_ifd)
self.assertDictEqual(EXIF_IFD, exif_ifd)
self.assertDictEqual(GPS_IFD, gps_ifd)
def test_dump_and_load2(self):
thumbnail_io = io.BytesIO()
thumb = Image.open(INPUT_FILE2)
thumb.thumbnail((40, 40))
thumb.save(thumbnail_io, "JPEG")
thumb.close()
thumb_data = thumbnail_io.getvalue()
exif_dict = {"0th":ZEROTH_IFD,
"Exif":EXIF_IFD,
"GPS":GPS_IFD,
"Interop":INTEROP_IFD,
"1st":FIRST_IFD,
"thumbnail":thumb_data}
exif_bytes = piexif.dump(exif_dict)
im = Image.new("RGB", (80, 80))
o = io.BytesIO()
im.save(o, format="jpeg", exif=exif_bytes)
im.close()
o.seek(0)
exif = piexif.load(o.getvalue())
exif["0th"].pop(ImageIFD.ExifTag) # pointer to exif IFD
exif["0th"].pop(ImageIFD.GPSTag) # pointer to GPS IFD
exif["Exif"].pop(ExifIFD.InteroperabilityTag)
self.assertDictEqual(ZEROTH_IFD, exif["0th"])
self.assertDictEqual(EXIF_IFD, exif["Exif"])
self.assertDictEqual(GPS_IFD, exif["GPS"])
self.assertDictEqual(INTEROP_IFD, exif["Interop"])
exif["1st"].pop(513) # pointer to exif IFD
exif["1st"].pop(514) # pointer to GPS IFD
self.assertDictEqual(FIRST_IFD, exif["1st"])
Image.open(io.BytesIO(exif["thumbnail"])).close()
def test_dump_and_load3(self):
ascii_v = ["a", "ab", "abc", "abcd", "abcde"]
undefined_v = [b"\x00",
b"\x00\x01",
b"\x00\x01\x02",
b"\x00\x01\x02\x03",
b"\x00\x01\x02\x03\x04"]
byte_v = [255,
(255, 254),
(255, 254, 253),
(255, 254, 253, 252),
(255, 254, 253, 252, 251)]
short_v = [65535,
(65535, 65534),
(65535, 65534, 65533),
(65535, 65534, 65533, 65532),
(65535, 65534, 65533, 65532, 65531)]
long_v = [4294967295,
(4294967295, 4294967294),
(4294967295, 4294967294, 4294967293),
(4294967295, 4294967294, 4294967293, 4294967292),
(5, 4, 3, 2, 1)]
rational_v = [(4294967295, 4294967294),
((4294967295, 4294967294), (4294967293, 4294967292)),
((1, 2), (3, 4), (5, 6)),
((1, 2), (3, 4), (5, 6), (7, 8)),
((1, 2), (3, 4), (5, 6), (7, 8), (9, 10))]
srational_v = [(2147483647, -2147483648),
((2147483647, -2147483648), (2147483645, 2147483644)),
((1, 2), (3, 4), (5, 6)),
((1, 2), (3, 4), (5, 6), (7, 8)),
((1, 2), (3, 4), (5, 6), (7, 8), (9, 10))]
for x in range(5):
exif_dict = {
"0th":{ImageIFD.ProcessingSoftware:ascii_v[x],
ImageIFD.InterColorProfile:undefined_v[x],
ImageIFD.SubfileType:short_v[x],
ImageIFD.WhitePoint:rational_v[x],
ImageIFD.BlackLevelDeltaH:srational_v[x]},
"Exif":{ExifIFD.ISOSpeed:long_v[x]},
"GPS":{GPSIFD.GPSVersionID:byte_v[x]},}
exif_bytes = piexif.dump(exif_dict)
e = piexif.load(exif_bytes)
self.assertEqual(
e["0th"][ImageIFD.ProcessingSoftware].decode("latin1"),
ascii_v[x])
self.assertEqual(
e["0th"][ImageIFD.InterColorProfile], undefined_v[x])
self.assertEqual(e["0th"][ImageIFD.SubfileType], short_v[x])
self.assertEqual(e["0th"][ImageIFD.WhitePoint], rational_v[x])
self.assertEqual(
e["0th"][ImageIFD.BlackLevelDeltaH], srational_v[x])
self.assertEqual(e["Exif"][ExifIFD.ISOSpeed], long_v[x])
self.assertEqual(e["GPS"][GPSIFD.GPSVersionID], byte_v[x])
def test_dump_and_load_specials(self):
"""test dump and load special types(SingedByte, SiginedShort, DoubleFloat)"""
zeroth_ifd_original = {
ImageIFD.ZZZTestSByte:-128,
ImageIFD.ZZZTestSShort:-32768,
ImageIFD.ZZZTestDFloat:1.0e-100,
}
exif_dict = {"0th":zeroth_ifd_original}
exif_bytes = piexif.dump(exif_dict)
exif = piexif.load(exif_bytes)
zeroth_ifd = exif["0th"]
self.assertEqual(
zeroth_ifd_original[ImageIFD.ZZZTestSByte],
zeroth_ifd[ImageIFD.ZZZTestSByte]
)
self.assertEqual(
zeroth_ifd_original[ImageIFD.ZZZTestSShort],
zeroth_ifd[ImageIFD.ZZZTestSShort]
)
self.assertEqual(
zeroth_ifd_original[ImageIFD.ZZZTestDFloat],
zeroth_ifd[ImageIFD.ZZZTestDFloat]
)
def test_dump_and_load_specials2(self):
"""test dump and load special types(SingedByte, SiginedShort, DoubleFloat)"""
zeroth_ifd_original = {
ImageIFD.ZZZTestSByte:(-128, -128),
ImageIFD.ZZZTestSShort:(-32768, -32768),
ImageIFD.ZZZTestDFloat:(1.0e-100, 1.0e-100),
}
exif_dict = {"0th":zeroth_ifd_original}
exif_bytes = piexif.dump(exif_dict)
exif = piexif.load(exif_bytes)
zeroth_ifd = exif["0th"]
self.assertEqual(
zeroth_ifd_original[ImageIFD.ZZZTestSByte],
zeroth_ifd[ImageIFD.ZZZTestSByte]
)
self.assertEqual(
zeroth_ifd_original[ImageIFD.ZZZTestSShort],
zeroth_ifd[ImageIFD.ZZZTestSShort]
)
self.assertEqual(
zeroth_ifd_original[ImageIFD.ZZZTestDFloat],
zeroth_ifd[ImageIFD.ZZZTestDFloat]
)
def test_roundtrip_files(self):
files = glob.glob(os.path.join("tests", "images", "r_*.jpg"))
for input_file in files:
print(input_file)
exif = piexif.load(input_file)
exif_bytes = piexif.dump(exif)
o = io.BytesIO()
piexif.insert(exif_bytes, input_file, o)
e = piexif.load(o.getvalue())
t = e.pop("thumbnail")
thumbnail = exif.pop("thumbnail")
if t is not None:
if not (b"\xe0" <= thumbnail[3:4] <= b"\xef"):
self.assertEqual(t, thumbnail)
else:
print("Given JPEG doesn't follow exif thumbnail standard. "
"APPn segments in thumbnail should be removed, "
"whereas thumbnail JPEG has it. \n: " +
input_file)
exif["1st"].pop(513)
e["1st"].pop(513)
exif["1st"].pop(514)
e["1st"].pop(514)
for ifd in e:
if ifd == "0th":
if ImageIFD.ExifTag in exif["0th"]:
exif["0th"].pop(ImageIFD.ExifTag)
e["0th"].pop(ImageIFD.ExifTag)
if ImageIFD.GPSTag in exif["0th"]:
exif["0th"].pop(ImageIFD.GPSTag)
e["0th"].pop(ImageIFD.GPSTag)
elif ifd == "Exif":
if ExifIFD.InteroperabilityTag in exif["Exif"]:
exif["Exif"].pop(ExifIFD.InteroperabilityTag)
e["Exif"].pop(ExifIFD.InteroperabilityTag)
for key in exif[ifd]:
self.assertEqual(exif[ifd][key], e[ifd][key])
print(" - pass")
# transplant ------
def test_transplant(self):
piexif.transplant(INPUT_FILE1, INPUT_FILE_PEN, "transplant.jpg")
i = Image.open("transplant.jpg")
i.close()
exif_src = piexif.load(INPUT_FILE1)
img_src = piexif.load(INPUT_FILE_PEN)
generated = piexif.load("transplant.jpg")
self.assertEqual(exif_src, generated)
self.assertNotEqual(img_src, generated)
piexif.transplant(INPUT_FILE1, "transplant.jpg")
self.assertEqual(piexif.load(INPUT_FILE1),
piexif.load("transplant.jpg"))
os.remove("transplant.jpg")
def test_transplant_m(self):
"""'transplant' on memory.
"""
o = io.BytesIO()
piexif.transplant(I1, I2, o)
self.assertEqual(piexif.load(I1), piexif.load(o.getvalue()))
Image.open(o).close()
def test_transplant_fail1(self):
with self.assertRaises(ValueError):
piexif.transplant(I1, I2, False)
def test_transplant_fail2(self):
with self.assertRaises(ValueError):
piexif.transplant(NOEXIF_FILE, I2, "foo.jpg")
# remove ------
def test_remove(self):
piexif.remove(INPUT_FILE1, "remove.jpg")
exif_dict = piexif.load("remove.jpg")
none_dict = {"0th":{},
"Exif":{},
"GPS":{},
"Interop":{},
"1st":{},
"thumbnail":None}
self.assertEqual(exif_dict, none_dict)
piexif.remove("remove.jpg")
exif_dict = piexif.load("remove.jpg")
self.assertEqual(exif_dict, none_dict)
os.remove("remove.jpg")
def test_remove2(self):
with open(INPUT_FILE1, "rb") as f:
data = f.read()
with open("remove2.jpg", "wb+") as f:
f.write(data)
piexif.remove("remove2.jpg")
exif_dict = piexif.load("remove2.jpg")
none_dict = {"0th":{},
"Exif":{},
"GPS":{},
"Interop":{},
"1st":{},
"thumbnail":None}
self.assertEqual(exif_dict, none_dict)
os.remove("remove2.jpg")
def test_remove_m(self):
"""'remove' on memory.
"""
o = io.BytesIO()
with self.assertRaises(ValueError):
piexif.remove(I1)
piexif.remove(I1, o)
exif_dict = piexif.load(o.getvalue())
none_dict = {"0th":{},
"Exif":{},
"GPS":{},
"Interop":{},
"1st":{},
"thumbnail":None}
self.assertEqual(exif_dict, none_dict)
Image.open(o).close()
# insert ------
def test_insert(self):
exif_dict = {"0th":ZEROTH_IFD, "Exif":EXIF_IFD, "GPS":GPS_IFD}
exif_bytes = piexif.dump(exif_dict)
piexif.insert(exif_bytes, INPUT_FILE1, "insert.jpg")
exif = load_exif_by_PIL("insert.jpg")
piexif.insert(exif_bytes, NOEXIF_FILE, "insert.jpg")
with self.assertRaises(ValueError):
piexif.insert(b"dummy", io.BytesIO())
piexif.insert(exif_bytes, "insert.jpg")
os.remove("insert.jpg")
def test_insert_m(self):
"""'insert' on memory.
"""
exif_dict = {"0th":ZEROTH_IFD, "Exif":EXIF_IFD, "GPS":GPS_IFD}
exif_bytes = piexif.dump(exif_dict)
o = io.BytesIO()
piexif.insert(exif_bytes, I1, o)
self.assertEqual(o.getvalue()[0:2], b"\xff\xd8")
exif = load_exif_by_PIL(o)
def test_insert_fail1(self):
with open(INPUT_FILE1, "rb") as f:
data = f.read()
with open("insert.jpg", "wb+") as f:
f.write(data)
exif_dict = {"0th":ZEROTH_IFD, "Exif":EXIF_IFD, "GPS":GPS_IFD}
exif_bytes = piexif.dump(exif_dict)
with self.assertRaises(ValueError):
piexif.insert(exif_bytes, INPUT_FILE_TIF)
os.remove("insert.jpg")
def test_insert_fail2(self):
exif_dict = {"0th":ZEROTH_IFD, "Exif":EXIF_IFD, "GPS":GPS_IFD}
exif_bytes = piexif.dump(exif_dict)
with self.assertRaises(ValueError):
piexif.insert(exif_bytes, I1, False)
# ------
def test_print_exif(self):
print("\n**********************************************")
t = time.time()
exif = piexif.load(INPUT_FILE_PEN)
t_cost = time.time() - t
print("'load': {}[sec]".format(t_cost))
for ifd in ("0th", "Exif", "GPS", "Interop", "1st"):
print("\n{} IFD:".format(ifd))
d = exif[ifd]
for key in sorted(d):
try:
print(" ", key, TAGS[ifd][key]["name"], d[key][:10])
except:
print(" ", key, TAGS[ifd][key]["name"], d[key])
print("**********************************************")
# test utility methods----------------------------------------------
def _compare_value(self, v1, v2):
if type(v1) != type(v2):
if isinstance(v1, tuple):
self.assertEqual(pack_byte(*v1), v2)
elif isinstance(v1, int):
self.assertEqual(struct.pack("B", v1), v2)
elif isinstance(v2, int):
self.assertEqual(struct.pack("B", v2), v1)
elif isinstance(v1, bytes) and isinstance(v2, str):
try:
self.assertEqual(v1, v2.encode("latin1"))
except:
self.assertEqual(v1, v2)
else:
try:
self.assertEqual(v1, v2.encode("latin1"))
except:
self.assertEqual(v1, v2)
else:
self.assertEqual(v1, v2)
def _compare_piexifDict_PILDict(self, piexifDict, pilDict, p=True):
zeroth_ifd = piexifDict["0th"]
exif_ifd = piexifDict["Exif"]
gps_ifd = piexifDict["GPS"]
if 41728 in exif_ifd:
exif_ifd.pop(41728) # value type is UNDEFINED but PIL returns int
if 34853 in pilDict:
gps = pilDict.pop(34853)
for key in sorted(zeroth_ifd):
if key in pilDict:
self._compare_value(zeroth_ifd[key], pilDict[key])
if p:
try:
print(TAGS["0th"][key]["name"],
zeroth_ifd[key][:10], pilDict[key][:10])
except:
print(TAGS["0th"][key]["name"],
zeroth_ifd[key], pilDict[key])
for key in sorted(exif_ifd):
if key in pilDict:
self._compare_value(exif_ifd[key], pilDict[key])
if p:
try:
print(TAGS["Exif"][key]["name"],
exif_ifd[key][:10], pilDict[key][:10])
except:
print(TAGS["Exif"][key]["name"],
exif_ifd[key], pilDict[key])
for key in sorted(gps_ifd):
if key in gps:
self._compare_value(gps_ifd[key], gps[key])
if p:
try:
print(TAGS["GPS"][key]["name"],
gps_ifd[key][:10], gps[key][:10])
except:
print(TAGS["GPS"][key]["name"],
gps_ifd[key], gps[key])
class UTests(unittest.TestCase):
def test_ExifReader_return_unknown(self):
b1 = b"MM\x00\x2a\x00\x00\x00\x08"
b2 = b"\x00\x01" + b"\xff\xff\x00\x00\x00\x00" + b"\x00\x00\x00\x00"
er = piexif._load._ExifReader(b1 + b2)
if er.tiftag[0:2] == b"II":
er.endian_mark = "<"
else:
er.endian_mark = ">"
ifd = er.get_ifd_dict(8, "0th", True)
self.assertEqual(ifd[65535][0], 0)
self.assertEqual(ifd[65535][1], 0)
self.assertEqual(ifd[65535][2], b"\x00\x00")
def test_ExifReader_convert_value_fail(self):
er = piexif._load._ExifReader(I1)
with self.assertRaises(ValueError):
er.convert_value((None, None, None, None))
def test_split_into_segments_fail1(self):
with self.assertRaises(InvalidImageDataError):
_common.split_into_segments(b"I'm not JPEG")
def test_split_into_segments_fail2(self):
with self.assertRaises(ValueError):
_common.split_into_segments(b"\xff\xd8\xff\xe1\xff\xff")
def test_merge_segments(self):
# Remove APP0, when both APP0 and APP1 exists.
with open(INPUT_FILE1, "rb") as f:
original = f.read()
segments = _common.split_into_segments(original)
new_data = _common.merge_segments(segments)
segments = _common.split_into_segments(new_data)
self.assertFalse(segments[1][0:2] == b"\xff\xe0"
and segments[2][0:2] == b"\xff\xe1")
self.assertEqual(segments[1][0:2], b"\xff\xe1")
o = io.BytesIO(new_data)
without_app0 = o.getvalue()
Image.open(o).close()
exif = _common.get_exif_seg(segments)
# Remove Exif, when second 'merged_segments' arguments is None
# and no APP0.
segments = _common.split_into_segments(without_app0)
new_data = _common.merge_segments(segments, None)
segments = _common.split_into_segments(new_data)
self.assertNotEqual(segments[1][0:2], b"\xff\xe0")
self.assertNotEqual(segments[1][0:2], b"\xff\xe1")
self.assertNotEqual(segments[2][0:2], b"\xff\xe1")
o = io.BytesIO(new_data)
Image.open(o).close()
# Insert exif to jpeg that has APP0 and Exif.
o = io.BytesIO()
i = Image.new("RGB", (8, 8))
i.save(o, format="jpeg", exif=exif)
o.seek(0)
segments = _common.split_into_segments(o.getvalue())
new_data = _common.merge_segments(segments, exif)
segments = _common.split_into_segments(new_data)
self.assertFalse(segments[1][0:2] == b"\xff\xe0"
and segments[2][0:2] == b"\xff\xe1")
self.assertEqual(segments[1], exif)
o = io.BytesIO(new_data)
Image.open(o).close()
# Insert exif to jpeg that doesn't have APP0 and Exif.
with open(NOAPP01_FILE, "rb") as f:
original = f.read()
segments = _common.split_into_segments(original)
new_data = _common.merge_segments(segments, exif)
segments = _common.split_into_segments(new_data)
self.assertEqual(segments[1][0:2], b"\xff\xe1")
o = io.BytesIO(new_data)
Image.open(o).close()
# Remove Exif, when second 'merged_segments' arguments is None
# and Exif exists.
with open(INPUT_FILE1, "rb") as f:
original = f.read()
segments = _common.split_into_segments(original)
new_data = _common.merge_segments(segments, None)
segments = _common.split_into_segments(new_data)
self.assertNotEqual(segments[1][0:2], b"\xff\xe1")
self.assertNotEqual(segments[2][0:2], b"\xff\xe1")
o = io.BytesIO(new_data)
Image.open(o).close()
def test_dump_user_comment(self):
# ascii
header = b"\x41\x53\x43\x49\x49\x00\x00\x00"
string = u"abcd"
binary = header + string.encode("ascii")
result = helper.UserComment.dump(string, "ascii")
self.assertEqual(binary, result)
# jis
header = b"\x4a\x49\x53\x00\x00\x00\x00\x00"
string = u"abcd"
binary = header + string.encode("shift_jis")
result = helper.UserComment.dump(string, "jis")
self.assertEqual(binary, result)
# unicode
header = b"\x55\x4e\x49\x43\x4f\x44\x45\x00"
string = u"abcd"
binary = header + string.encode("utf-16-be")
result = helper.UserComment.dump(string, "unicode")
self.assertEqual(binary, result)
# undefined
header = b"\x00\x00\x00\x00\x00\x00\x00\x00"
string = u"abcd"
binary = header + string.encode("latin")
self.assertRaises(ValueError, helper.UserComment.dump, string, "undefined")
def test_load_user_comment(self):
# ascii
header = b"\x41\x53\x43\x49\x49\x00\x00\x00"
string = u"abcd"
binary = header + string.encode("ascii")
result = helper.UserComment.load(binary)
self.assertEqual(string, result)
# jis
header = b"\x4a\x49\x53\x00\x00\x00\x00\x00"
string = u"abcd"
binary = header + string.encode("shift_jis")
result = helper.UserComment.load(binary)
self.assertEqual(string, result)
# unicode
header = b"\x55\x4e\x49\x43\x4f\x44\x45\x00"
string = u"abcd"
binary = header + string.encode("utf-16-be")
result = helper.UserComment.load(binary)
self.assertEqual(string, result)
# undefined
header = b"\x00\x00\x00\x00\x00\x00\x00\x00"
string = u"abcd"
binary = header + string.encode("ascii")
self.assertRaises(ValueError, helper.UserComment.load, binary)
class HelperTests(unittest.TestCase):
def test_headers(self):
"""Are our headers the correct length?"""
self.assertEqual(len(helper.UserComment._ASCII_PREFIX), helper.UserComment._PREFIX_SIZE)
self.assertEqual(len(helper.UserComment._JIS_PREFIX), helper.UserComment._PREFIX_SIZE)
self.assertEqual(len(helper.UserComment._UNICODE_PREFIX), helper.UserComment._PREFIX_SIZE)
self.assertEqual(len(helper.UserComment._UNDEFINED_PREFIX), helper.UserComment._PREFIX_SIZE)
def test_encode_ascii(self):
"""Do we encode ASCII correctly?"""
text = 'hello world'
expected = b'\x41\x53\x43\x49\x49\x00\x00\x00hello world'
actual = helper.UserComment.dump(text, encoding='ascii')
self.assertEqual(expected, actual)
def test_decode_ascii(self):
"""Do we decode ASCII correctly?"""
binary = b'\x41\x53\x43\x49\x49\x00\x00\x00hello world'
expected = 'hello world'
actual = helper.UserComment.load(binary)
self.assertEqual(expected, actual)
def test_encode_jis(self):
"""Do we encode JIS correctly?"""
text = '\u3053\u3093\u306b\u3061\u306f\u4e16\u754c'
expected = b'\x4a\x49\x53\x00\x00\x00\x00\x00' + text.encode('shift_jis')
actual = helper.UserComment.dump(text, encoding='jis')
self.assertEqual(expected, actual)
def test_decode_jis(self):
"""Do we decode JIS correctly?"""
expected = '\u3053\u3093\u306b\u3061\u306f\u4e16\u754c'
binary = b'\x4a\x49\x53\x00\x00\x00\x00\x00' + expected.encode('shift_jis')
actual = helper.UserComment.load(binary)
self.assertEqual(expected, actual)
def test_encode_unicode(self):
"""Do we encode Unicode correctly?"""
text = '\u3053\u3093\u306b\u3061\u306f\u4e16\u754c'
expected = b'\x55\x4e\x49\x43\x4f\x44\x45\x00' + text.encode('utf_16_be')
actual = helper.UserComment.dump(text, encoding='unicode')
self.assertEqual(expected, actual)
def test_decode_unicode(self):
"""Do we decode Unicode correctly?"""
expected = '\u3053\u3093\u306b\u3061\u306f\u4e16\u754c'
binary = b'\x55\x4e\x49\x43\x4f\x44\x45\x00' + expected.encode('utf_16_be')
actual = helper.UserComment.load(binary)
self.assertEqual(expected, actual)
def test_encode_bad_encoding(self):
"""De we gracefully handle bad input when encoding?"""
self.assertRaises(ValueError, helper.UserComment.dump, 'hello world', 'koi-8r')
def test_decode_bad_encoding(self):
"""De we gracefully handle bad input when decoding?"""
self.assertRaises(ValueError, helper.UserComment.load,
b'\x00\x00\x00\x00\x00\x00\x00\x00hello')
self.assertRaises(ValueError, helper.UserComment.load,
b'\x12\x34\x56\x78\x9a\xbc\xde\xffhello')
self.assertRaises(ValueError, helper.UserComment.load, b'hello world')
class WebpTests(unittest.TestCase):
def setUp(self):
try:
os.mkdir("tests/images/out")
except:
pass
def test_merge_chunks(self):
"""Can PIL open our output WebP?"""
IMAGE_DIR = "tests/images/"
OUT_DIR = "tests/images/out/"
files = [
"tool1.webp",
"pil1.webp",
"pil2.webp",
"pil3.webp",
"pil_rgb.webp",
"pil_rgba.webp",
]
for filename in files:
try:
Image.open(IMAGE_DIR + filename)
except:
print("Pillow can't read {}".format(filename))
continue
with open(IMAGE_DIR + filename, "rb") as f:
data = f.read()
chunks = _webp.split(data)
file_header = _webp.get_file_header(chunks)
merged = _webp.merge_chunks(chunks)
new_webp_bytes = file_header + merged
with open(OUT_DIR + "raw_" + filename, "wb") as f:
f.write(new_webp_bytes)
Image.open(OUT_DIR + "raw_" + filename)
def test_insert_exif(self):
"""Can PIL open WebP that is inserted exif?"""
IMAGE_DIR = "tests/images/"
OUT_DIR = "tests/images/out/"
files = [
"tool1.webp",
"pil1.webp",
"pil2.webp",
"pil3.webp",
"pil_rgb.webp",
"pil_rgba.webp",
]
exif_dict = {
"0th":{
piexif.ImageIFD.Software: b"PIL",
piexif.ImageIFD.Make: b"Make",
}
}
for filename in files:
try:
Image.open(IMAGE_DIR + filename)
except:
print("Pillow can't read {}".format(filename))
continue
with open(IMAGE_DIR + filename, "rb") as f:
data = f.read()
exif_bytes = piexif.dump(exif_dict)
exif_inserted = _webp.insert(data, exif_bytes)
with open(OUT_DIR + "i_" + filename, "wb") as f:
f.write(exif_inserted)
Image.open(OUT_DIR + "i_" + filename)
def test_remove_exif(self):
"""Can PIL open WebP that is removed exif?"""
IMAGE_DIR = "tests/images/"
OUT_DIR = "tests/images/out/"
files = [
"tool1.webp",
"pil1.webp",
"pil2.webp",
"pil3.webp",
"pil_rgb.webp",
"pil_rgba.webp",
]
for filename in files:
try:
Image.open(IMAGE_DIR + filename)
except:
print("Pillow can't read {}".format(filename))
continue
with open(IMAGE_DIR + filename, "rb") as f:
data = f.read()
exif_removed = _webp.remove(data)
with open(OUT_DIR + "r_" + filename, "wb") as f:
f.write(exif_removed)
Image.open(OUT_DIR + "r_" + filename)
def test_get_exif(self):
"""Can we get exif from WebP?"""
IMAGE_DIR = "tests/images/"
OUT_DIR = "tests/images/out/"
files = [
"tool1.webp",
]
for filename in files:
try:
Image.open(IMAGE_DIR + filename)
except:
print("Pillow can't read {}".format(filename))
continue
with open(IMAGE_DIR + filename, "rb") as f:
data = f.read()
exif_bytes = _webp.get_exif(data)
self.assertEqual(exif_bytes[0:2], b"MM")
def test_load(self):
"""Can we get exif from WebP?"""
IMAGE_DIR = "tests/images/"
OUT_DIR = "tests/images/out/"
files = [
"tool1.webp",
]
for filename in files:
try:
Image.open(IMAGE_DIR + filename)
except:
print("Pillow can't read {}".format(filename))
continue
print(piexif.load(IMAGE_DIR + filename))
def test_remove(self):
"""Can PIL open WebP that is removed exif?"""
IMAGE_DIR = "tests/images/"
OUT_DIR = "tests/images/out/"
files = [
"tool1.webp",
"pil1.webp",
"pil2.webp",
"pil3.webp",
"pil_rgb.webp",
"pil_rgba.webp",
]
for filename in files:
try:
Image.open(IMAGE_DIR + filename)
except:
print("Pillow can't read {}".format(filename))
continue
piexif.remove(IMAGE_DIR + filename, OUT_DIR + "rr_" + filename)
Image.open(OUT_DIR + "rr_" + filename)
def test_insert(self):
"""Can PIL open WebP that is inserted exif?"""
IMAGE_DIR = "tests/images/"
OUT_DIR = "tests/images/out/"
files = [
"tool1.webp",
"pil1.webp",
"pil2.webp",
"pil3.webp",
"pil_rgb.webp",
"pil_rgba.webp",
]
exif_dict = {
"0th":{
piexif.ImageIFD.Software: b"PIL",
piexif.ImageIFD.Make: b"Make",
}
}
exif_bytes = piexif.dump(exif_dict)
for filename in files:
try:
Image.open(IMAGE_DIR + filename)
except:
print("Pillow can't read {}".format(filename))
continue
piexif.insert(exif_bytes, IMAGE_DIR + filename, OUT_DIR + "ii_" + filename)
Image.open(OUT_DIR + "ii_" + filename)
def suite():
suite = unittest.TestSuite()
suite.addTests([
unittest.makeSuite(UTests),
unittest.makeSuite(ExifTests),
unittest.makeSuite(HelperTests),
unittest.makeSuite(WebpTests),
])
return suite
if __name__ == '__main__':
unittest.main()
|
|
# pylint: disable=function-redefined
from __future__ import unicode_literals
from prompt_toolkit.buffer import SelectionType, indent, unindent
from prompt_toolkit.keys import Keys
from prompt_toolkit.enums import IncrementalSearchDirection, SEARCH_BUFFER, SYSTEM_BUFFER
from prompt_toolkit.filters import CLIFilter, Always
from .utils import create_handle_decorator
import prompt_toolkit.filters as filters
__all__ = (
'load_emacs_bindings',
'load_emacs_search_bindings',
'load_emacs_system_bindings',
)
def load_emacs_bindings(registry, filter=Always()):
"""
Some e-macs extensions.
"""
# Overview of Readline emacs commands:
# http://www.catonmat.net/download/readline-emacs-editing-mode-cheat-sheet.pdf
assert isinstance(filter, CLIFilter)
handle = create_handle_decorator(registry, filter)
has_selection = filters.HasSelection()
@handle(Keys.Escape)
def _(event):
"""
By default, ignore escape key.
(If we don't put this here, and Esc is followed by a key which sequence
is not handled, we'll insert an Escape character in the input stream.
Something we don't want and happens to easily in emacs mode.
Further, people can always use ControlQ to do a quoted insert.)
"""
pass
@handle(Keys.ControlA)
def _(event):
"""
Start of line.
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=False)
@handle(Keys.ControlB)
def _(event):
"""
Character back.
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_cursor_left_position(count=event.arg)
@handle(Keys.ControlE)
def _(event):
"""
End of line.
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_end_of_line_position()
@handle(Keys.ControlF)
def _(event):
"""
Character forward.
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_cursor_right_position(count=event.arg)
@handle(Keys.ControlN, filter= ~has_selection)
def _(event):
"""
Next line.
"""
event.current_buffer.auto_down()
@handle(Keys.ControlN, filter=has_selection)
def _(event):
"""
Next line.
"""
event.current_buffer.cursor_down()
@handle(Keys.ControlO, filter= ~has_selection)
def _(event):
"""
Insert newline, but don't move the cursor.
"""
event.current_buffer.insert_text('\n', move_cursor=False)
@handle(Keys.ControlP, filter= ~has_selection)
def _(event):
"""
Previous line.
"""
event.current_buffer.auto_up(count=event.arg)
@handle(Keys.ControlP, filter=has_selection)
def _(event):
"""
Previous line.
"""
event.current_buffer.cursor_up(count=event.arg)
@handle(Keys.ControlQ, Keys.Any, filter= ~has_selection)
def _(event):
"""
Quoted insert.
For vt100 terminals, you have to disable flow control by running
``stty -ixon``, otherwise Ctrl-Q and Ctrl-S are captured by the
terminal.
"""
event.current_buffer.insert_text(event.data, overwrite=False)
@handle(Keys.ControlY, filter= ~has_selection)
@handle(Keys.ControlX, 'r', 'y', filter= ~has_selection)
def _(event):
"""
Paste before cursor.
"""
event.current_buffer.paste_clipboard_data(
event.cli.clipboard.get_data(), count=event.arg, before=True)
@handle(Keys.ControlUnderscore, save_before=False, filter= ~has_selection)
def _(event):
"""
Undo.
"""
event.current_buffer.undo()
def handle_digit(c):
"""
Handle Alt + digit in the `meta_digit` method.
"""
@handle(Keys.Escape, c)
def _(event):
event.append_to_arg_count(c)
for c in '0123456789':
handle_digit(c)
@handle(Keys.Escape, '-')
def _(event):
"""
"""
if event._arg is None:
event.append_to_arg_count('-')
@handle(Keys.Escape, Keys.ControlJ, filter= ~has_selection)
def _(event):
"""
Meta + Newline: always accept input.
"""
b = event.current_buffer
if b.accept_action.is_returnable:
b.accept_action.validate_and_handle(event.cli, b)
# Not returnable, but multiline.
elif b.is_multiline():
b.insert_text('\n')
@handle(Keys.ControlSquareClose, Keys.Any)
def _(event):
"""
When Ctl-] + a character is pressed. go to that character.
"""
match = event.current_buffer.document.find(event.data, in_current_line=True, count=(event.arg))
if match is not None:
event.current_buffer.cursor_position += match
@handle(Keys.Escape, Keys.Backspace, filter= ~has_selection)
def _(event):
"""
Delete word backwards.
"""
buffer = event.current_buffer
pos = buffer.document.find_start_of_previous_word(count=event.arg)
if pos:
deleted = buffer.delete_before_cursor(count=-pos)
event.cli.clipboard.set_text(deleted)
@handle(Keys.Escape, 'a', filter= ~has_selection)
def _(event):
"""
Previous sentence.
"""
# TODO:
pass
@handle(Keys.Escape, 'c', filter= ~has_selection)
def _(event):
"""
Capitalize the current (or following) word.
"""
buffer = event.current_buffer
for i in range(event.arg):
pos = buffer.document.find_next_word_ending()
words = buffer.document.text_after_cursor[:pos]
buffer.insert_text(words.title(), overwrite=True)
@handle(Keys.Escape, 'd', filter= ~has_selection)
def _(event):
"""
Delete word forwards.
"""
buffer = event.current_buffer
pos = buffer.document.find_next_word_ending(count=event.arg)
if pos:
deleted = buffer.delete(count=pos)
event.cli.clipboard.set_text(deleted)
@handle(Keys.Escape, 'e', filter= ~has_selection)
def _(event):
""" Move to end of sentence. """
# TODO:
pass
@handle(Keys.Escape, 'f')
@handle(Keys.ControlRight)
def _(event):
"""
Cursor to end of next word.
"""
buffer= event.current_buffer
pos = buffer.document.find_next_word_ending(count=event.arg)
if pos:
buffer.cursor_position += pos
@handle(Keys.Escape, 'b')
@handle(Keys.ControlLeft)
def _(event):
"""
Cursor to start of previous word.
"""
buffer = event.current_buffer
pos = buffer.document.find_previous_word_beginning(count=event.arg)
if pos:
buffer.cursor_position += pos
@handle(Keys.Escape, 'l', filter= ~has_selection)
def _(event):
"""
Lowercase the current (or following) word.
"""
buffer = event.current_buffer
for i in range(event.arg): # XXX: not DRY: see meta_c and meta_u!!
pos = buffer.document.find_next_word_ending()
words = buffer.document.text_after_cursor[:pos]
buffer.insert_text(words.lower(), overwrite=True)
@handle(Keys.Escape, 't', filter= ~has_selection)
def _(event):
"""
Swap the last two words before the cursor.
"""
# TODO
@handle(Keys.Escape, 'u', filter= ~has_selection)
def _(event):
"""
Uppercase the current (or following) word.
"""
buffer = event.current_buffer
for i in range(event.arg):
pos = buffer.document.find_next_word_ending()
words = buffer.document.text_after_cursor[:pos]
buffer.insert_text(words.upper(), overwrite=True)
@handle(Keys.Escape, '.', filter= ~has_selection)
def _(event):
"""
Rotate through the last word (white-space delimited) of the previous lines in history.
"""
# TODO
@handle(Keys.Escape, '\\', filter= ~has_selection)
def _(event):
"""
Delete all spaces and tabs around point.
(delete-horizontal-space)
"""
@handle(Keys.Escape, '*', filter= ~has_selection)
def _(event):
"""
`meta-*`: Insert all possible completions of the preceding text.
"""
@handle(Keys.ControlX, Keys.ControlU, save_before=False, filter= ~has_selection)
def _(event):
event.current_buffer.undo()
@handle(Keys.ControlX, Keys.ControlX)
def _(event):
"""
Move cursor back and forth between the start and end of the current
line.
"""
buffer = event.current_buffer
if buffer.document.current_char == '\n':
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=False)
else:
buffer.cursor_position += buffer.document.get_end_of_line_position()
@handle(Keys.ControlSpace)
def _(event):
"""
Start of the selection.
"""
# Take the current cursor position as the start of this selection.
event.current_buffer.start_selection(selection_type=SelectionType.CHARACTERS)
@handle(Keys.ControlG, filter= ~has_selection)
def _(event):
"""
Control + G: Cancel completion menu and validation state.
"""
event.current_buffer.complete_state = None
event.current_buffer.validation_error = None
@handle(Keys.ControlG, filter=has_selection)
def _(event):
"""
Cancel selection.
"""
event.current_buffer.exit_selection()
@handle(Keys.ControlW, filter=has_selection)
@handle(Keys.ControlX, 'r', 'k', filter=has_selection)
def _(event):
"""
Cut selected text.
"""
data = event.current_buffer.cut_selection()
event.cli.clipboard.set_data(data)
@handle(Keys.Escape, 'w', filter=has_selection)
def _(event):
"""
Copy selected text.
"""
data = event.current_buffer.copy_selection()
event.cli.clipboard.set_data(data)
@handle(Keys.Escape, '<', filter= ~has_selection)
def _(event):
"""
Move to the first line in the history.
"""
event.current_buffer.go_to_history(0)
@handle(Keys.Escape, '>', filter= ~has_selection)
def _(event):
"""
Move to the end of the input history.
This is the line we are editing.
"""
buffer = event.current_buffer
buffer.go_to_history(len(buffer._working_lines) - 1)
@handle(Keys.Escape, Keys.Left)
def _(event):
"""
Cursor to start of previous word.
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.find_previous_word_beginning(count=event.arg) or 0
@handle(Keys.Escape, Keys.Right)
def _(event):
"""
Cursor to start of next word.
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.find_next_word_beginning(count=event.arg) or \
buffer.document.get_end_of_document_position()
@handle(Keys.Escape, '/', filter= ~has_selection)
def _(event):
"""
M-/: Complete.
"""
b = event.current_buffer
if b.complete_state:
b.complete_next()
else:
event.cli.start_completion(select_first=True)
@handle(Keys.ControlC, '>', filter=has_selection)
def _(event):
"""
Indent selected text.
"""
buffer = event.current_buffer
buffer.cursor_position += buffer.document.get_start_of_line_position(after_whitespace=True)
from_, to = buffer.document.selection_range()
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
indent(buffer, from_ - 1, to, count=event.arg) # XXX: why does translate_index_to_position return 1-based indexing???
@handle(Keys.ControlC, '<', filter=has_selection)
def _(event):
"""
Unindent selected text.
"""
buffer = event.current_buffer
from_, to = buffer.document.selection_range()
from_, _ = buffer.document.translate_index_to_position(from_)
to, _ = buffer.document.translate_index_to_position(to)
unindent(buffer, from_ - 1, to, count=event.arg)
def load_emacs_open_in_editor_bindings(registry, filter=None):
"""
Pressing C-X C-E will open the buffer in an external editor.
"""
handle = create_handle_decorator(registry, filter)
has_selection = filters.HasSelection()
@handle(Keys.ControlX, Keys.ControlE, filter= ~has_selection)
def _(event):
"""
Open editor.
"""
event.current_buffer.open_in_editor()
def load_emacs_system_bindings(registry, filter=None):
handle = create_handle_decorator(registry, filter)
has_focus = filters.HasFocus(SYSTEM_BUFFER)
@handle(Keys.Escape, '!', filter= ~has_focus)
def _(event):
"""
M-'!' opens the system prompt.
"""
event.cli.focus_stack.push(SYSTEM_BUFFER)
@handle(Keys.Escape, filter=has_focus)
@handle(Keys.ControlG, filter=has_focus)
@handle(Keys.ControlC, filter=has_focus)
def _(event):
"""
Cancel system prompt.
"""
event.cli.buffers[SYSTEM_BUFFER].reset()
event.cli.focus_stack.pop()
@handle(Keys.ControlJ, filter=has_focus)
def _(event):
"""
Run system command.
"""
system_line = event.cli.buffers[SYSTEM_BUFFER]
event.cli.run_system_command(system_line.text)
system_line.reset(append_to_history=True)
# Focus previous buffer again.
event.cli.focus_stack.pop()
def load_emacs_search_bindings(registry, filter=None):
handle = create_handle_decorator(registry, filter)
has_focus = filters.HasFocus(SEARCH_BUFFER)
@handle(Keys.ControlG, filter=has_focus)
@handle(Keys.ControlC, filter=has_focus)
# NOTE: the reason for not also binding Escape to this one, is that we want
# Alt+Enter to accept input directly in incremental search mode.
def _(event):
"""
Abort an incremental search and restore the original line.
"""
search_buffer = event.cli.buffers[SEARCH_BUFFER]
search_buffer.reset()
event.cli.focus_stack.pop()
@handle(Keys.ControlJ, filter=has_focus)
def _(event):
"""
When enter pressed in isearch, quit isearch mode. (Multiline
isearch would be too complicated.)
"""
input_buffer = event.cli.buffers[event.cli.focus_stack.previous]
search_buffer = event.cli.buffers[SEARCH_BUFFER]
# Update search state.
if search_buffer.text:
event.cli.search_state.text = search_buffer.text
# Apply search.
input_buffer.apply_search(event.cli.search_state)
# Add query to history of search line.
search_buffer.append_to_history()
search_buffer.reset()
# Focus previous document again.
event.cli.focus_stack.pop()
@handle(Keys.ControlR, filter= ~has_focus)
def _(event):
event.cli.search_state.direction = IncrementalSearchDirection.BACKWARD
event.cli.focus_stack.push(SEARCH_BUFFER)
@handle(Keys.ControlS, filter= ~has_focus)
def _(event):
event.cli.search_state.direction = IncrementalSearchDirection.FORWARD
event.cli.focus_stack.push(SEARCH_BUFFER)
@handle(Keys.ControlR, filter=has_focus)
@handle(Keys.Up, filter=has_focus)
def _(event):
# Update search_state.
search_state = event.cli.search_state
search_state.text = event.cli.buffers[SEARCH_BUFFER].text
search_state.direction = IncrementalSearchDirection.BACKWARD
# Apply search to current buffer.
input_buffer = event.cli.buffers[event.cli.focus_stack.previous]
input_buffer.apply_search(event.cli.search_state)
@handle(Keys.ControlS, filter=has_focus)
@handle(Keys.Down, filter=has_focus)
def _(event):
# Update search_state.
search_state = event.cli.search_state
search_state.text = event.cli.buffers[SEARCH_BUFFER].text
search_state.direction = IncrementalSearchDirection.FORWARD
# Apply search to current buffer.
input_buffer = event.cli.buffers[event.cli.focus_stack.previous]
input_buffer.apply_search(event.cli.search_state)
|
|
# This program rearranges raw Egderyders data and builds two lists of dicts, userlist and ciommentslist, containing
# of the data needed to buildm graphs. These objects are then saved into files.
import os, sys
import json
import csv
from datetime import datetime
import time
import networkx as nx
import logging
import edgesense.utils as eu
from edgesense.utils.logger_initializer import initialize_logger
from edgesense.network.utils import extract_edges, extract_multiauthor_post_edges, build_network
from edgesense.metrics import calculate_network_metrics
def load_files(users_resource, nodes_resource, comments_resource, username, password, extraction_method, dumpto, generated):
if dumpto:
base_dump_dir = os.path.join(dumpto, generated.strftime('%Y-%m-%d-%H-%M-%S'))
eu.resource.mkdir(base_dump_dir)
# load users
if dumpto:
dump_to = os.path.join(base_dump_dir, 'users.json')
else:
dump_to = None
jusers = eu.resource.load(users_resource, username=username, password=password, dump_to=dump_to)
allusers = eu.extract.extract(extraction_method, 'users', jusers)
# load nodes
if dumpto:
dump_to = os.path.join(base_dump_dir, 'nodes.json')
else:
dump_to = None
jnodes = eu.resource.load(nodes_resource, username=username, password=password, dump_to=dump_to)
allnodes = eu.extract.extract(extraction_method, 'nodes', jnodes)
# load comments
if dumpto:
dump_to = os.path.join(base_dump_dir, 'comments.json')
else:
dump_to = None
jcomments = eu.resource.load(comments_resource, username=username, password=password, dump_to=dump_to)
allcomments = eu.extract.extract(extraction_method, 'comments', jcomments)
logging.info("file loaded")
return (allusers,allnodes,allcomments)
def parse_options(argv):
import getopt
# defaults
try:
source_path = os.environ['EDGESENSE_SOURCE_DIR']
except KeyError:
source_path = ''
users_resource = source_path + 'users.json'
nodes_resource = source_path + 'nodes.json'
comments_resource = source_path + 'comments.json'
node_title_field = 'uid'
timestep_size = 60*60*24*7
timestep_window = 1
timestep_count = None
username = None
password = None
extraction_method = 'nested'
admin_roles = set()
exclude_isolated = False
dumpto = None
create_datapackage = False
license_type = None
license_url = None
datapackage_title = None
destination_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "static", "json"))
try:
opts, args = getopt.getopt(argv,"hu:n:c:t:s:w:f:o:",["users=","nodes=","comments=", "node-title=", "timestep-size=", "timestep-window=", "timestep-count=", "output-directory=", "username=", "password=", "extraction-method=", "admin-roles=", "exclude-isolated", "datapackage-license-type=", "datapackage-license-url=", "datapackage-title=", "dumpto="])
except getopt.GetoptError:
print 'build_network.py -u <users_resource> -n <nodes_resource> -c <comments_resource> -t <node title field> -s <timestep in seconds> -w <timestep window> -f <timestep count> -o <output directory>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'build_network.py -u <users_resource> -n <nodes_resource> -c <comments_resource> -t <node title field> -s <timestep in seconds> -w <timestep window> -f <timestep count> -o <output directory> --username="<http basic auth user>" --password="<http basic auth password>" --admin-roles="<comma separated list of roles marking a user as part of the community team>" --exclude-isolated --datapackage-license-type="<license name for the datapackage>" --datapackage-license-url="<license url for the datapackage>" --datapackage-title="<title for the datapackage>" --dumpto="<where to save the downloaded file>"'
sys.exit()
elif opt in ("-u", "--users"):
users_resource = arg
elif opt in ("-n", "--nodes"):
nodes_resource = arg
elif opt in ("-c", "--comments"):
comments_resource = arg
elif opt in ("-t", "--node-title"):
node_title_field = arg
elif opt in ("-s", "--timestep-size"):
timestep_size = int(arg)
elif opt in ("-w", "--timestep-window"):
timestep_window = int(arg)
elif opt in ("-f", "--timestep-count"):
timestep_count = int(arg)
elif opt in ("-o", "--output-directory"):
destination_path = arg
elif opt in ("--username"):
username = arg
elif opt in ("--password"):
password = arg
elif opt in ("--extraction-method"):
extraction_method = arg
elif opt in ("--admin-roles"):
admin_roles = set([e.strip() for e in arg.split(",") if e.strip()])
elif opt in ("--exclude-isolated"):
exclude_isolated = True
elif opt in ("--dumpto"):
dumpto = arg
elif opt in ("--datapackage-license-type"):
license_type = arg
elif opt in ("--datapackage-license-url"):
license_url = arg
elif opt in ("--datapackage-title"):
datapackage_title = arg
if license_type and license_url:
create_datapackage = True
logging.info("parsing files %(u)s %(n)s %(c)s" % {'u': users_resource, 'n': nodes_resource, 'c': comments_resource})
return (users_resource,
nodes_resource,
comments_resource,
node_title_field,
timestep_size,
timestep_window,
timestep_count,
username, password,
extraction_method,
admin_roles,
exclude_isolated,
dumpto,
create_datapackage,
datapackage_title,
license_type,
license_url,
destination_path)
def main():
initialize_logger('./albertoEdgesenseLog')
generated = datetime.now()
users_resource, \
nodes_resource, \
comments_resource, \
node_title_field, \
timestep_size, \
timestep_window, \
timestep_count, \
username, \
password, \
extraction_method, \
admin_roles, \
exclude_isolated, \
dumpto, \
create_datapackage, \
datapackage_title, \
license_type, \
license_url, \
destination_path = parse_options(sys.argv[1:])
logging.info("Network processing - started")
# Load the files
allusers, allnodes, allcomments = load_files(users_resource, nodes_resource, comments_resource, username, password, extraction_method, dumpto, generated)
# extract a normalized set of data
nodes_map, posts_map, comments_map = eu.extract.normalized_data(allusers, allnodes, allcomments, node_title_field, admin_roles, exclude_isolated)
# this is the network object
# going forward it should be read from a serialized format to handle caching
network = {}
# Add some file metadata
network['meta'] = {}
# Timestamp of the file generation (to show in the dashboard)
network['meta']['generated'] = int(generated.strftime("%s"))
network['edges'] = extract_edges(nodes_map, comments_map)
network['edges'] += extract_multiauthor_post_edges(nodes_map, posts_map)
# filter out nodes that have not participated to the full:conversations
inactive_nodes = [ v for v in nodes_map.values() if not v['active'] ]
logging.info("inactive nodes: %(n)i" % {'n':len(inactive_nodes)})
network['nodes'] = [ v for v in nodes_map.values() if v['active'] ]
directed_multiedge_network = calculate_network_metrics(nodes_map, posts_map, comments_map, network, timestep_size, timestep_window, timestep_count)
eu.resource.write_network(network, \
directed_multiedge_network, \
generated, \
create_datapackage, \
datapackage_title, \
license_type, \
license_url, \
destination_path)
logging.info("Completed")
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A command line parsing module that lets modules define their own options.
Each module defines its own options, e.g.,
from tornado.options import define, options
define("mysql_host", default="127.0.0.1:3306", help="Main user DB")
define("memcache_hosts", default="127.0.0.1:11011", multiple=True,
help="Main user memcache servers")
def connect():
db = database.Connection(options.mysql_host)
...
The main() method of your application does not need to be aware of all of
the options used throughout your program; they are all automatically loaded
when the modules are loaded. Your main() method can parse the command line
or parse a config file with:
import tornado.options
tornado.options.parse_config_file("/etc/server.conf")
tornado.options.parse_command_line()
Command line formats are what you would expect ("--myoption=myvalue").
Config files are just Python files. Global names become options, e.g.,
myoption = "myvalue"
myotheroption = "myothervalue"
We support datetimes, timedeltas, ints, and floats (just pass a 'type'
kwarg to define). We also accept multi-value options. See the documentation
for define() below.
"""
import datetime
import logging
import logging.handlers
import re
import sys
import time
# For pretty log messages, if available
try:
import curses
except:
curses = None
def define(name, default=None, type=str, help=None, metavar=None,
multiple=False):
"""Defines a new command line option.
If type is given (one of str, float, int, datetime, or timedelta),
we parse the command line arguments based on the given type. If
multiple is True, we accept comma-separated values, and the option
value is always a list.
For multi-value integers, we also accept the syntax x:y, which
turns into range(x, y) - very useful for long integer ranges.
help and metavar are used to construct the automatically generated
command line help string. The help message is formatted like:
--name=METAVAR help string
Command line option names must be unique globally. They can be parsed
from the command line with parse_command_line() or parsed from a
config file with parse_config_file.
"""
if name in options:
raise Error("Option %r already defined in %s", name,
options[name].file_name)
frame = sys._getframe(0)
options_file = frame.f_code.co_filename
file_name = frame.f_back.f_code.co_filename
if file_name == options_file: file_name = ""
options[name] = _Option(name, file_name=file_name, default=default,
type=type, help=help, metavar=metavar,
multiple=multiple)
def parse_command_line(args=None):
"""Parses all options given on the command line.
We return all command line arguments that are not options as a list.
"""
if args is None: args = sys.argv
remaining = []
for i in xrange(1, len(args)):
# All things after the last option are command line arguments
if not args[i].startswith("-"):
remaining = args[i:]
break
if args[i] == "--":
remaining = args[i+1:]
break
arg = args[i].lstrip("-")
name, equals, value = arg.partition("=")
name = name.replace('-', '_')
if not name in options:
print_help()
raise Error('Unrecognized command line option: %r' % name)
option = options[name]
if not equals:
if option.type == bool:
value = "true"
else:
raise Error('Option %r requires a value' % name)
option.parse(value)
if options.help:
print_help()
sys.exit(0)
# Set up log level and pretty console logging by default
if options.logging != 'none':
logging.getLogger().setLevel(getattr(logging, options.logging.upper()))
enable_pretty_logging()
return remaining
def parse_config_file(path):
"""Parses and loads the Python config file at the given path."""
config = {}
execfile(path, config, config)
for name in config:
if name in options:
options[name].set(config[name])
def print_help(file=sys.stdout):
"""Prints all the command line options to stdout."""
print >> file, "Usage: %s [OPTIONS]" % sys.argv[0]
print >> file, ""
print >> file, "Options:"
by_file = {}
for option in options.itervalues():
by_file.setdefault(option.file_name, []).append(option)
for filename, o in sorted(by_file.items()):
if filename: print >> file, filename
o.sort(key=lambda option: option.name)
for option in o:
prefix = option.name
if option.metavar:
prefix += "=" + option.metavar
print >> file, " --%-30s %s" % (prefix, option.help or "")
print >> file
class _Options(dict):
"""Our global program options, an dictionary with object-like access."""
@classmethod
def instance(cls):
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
def __getattr__(self, name):
if isinstance(self.get(name), _Option):
return self[name].value()
raise AttributeError("Unrecognized option %r" % name)
class _Option(object):
def __init__(self, name, default=None, type=str, help=None, metavar=None,
multiple=False, file_name=None):
if default is None and multiple:
default = []
self.name = name
self.type = type
self.help = help
self.metavar = metavar
self.multiple = multiple
self.file_name = file_name
self.default = default
self._value = None
def value(self):
return self.default if self._value is None else self._value
def parse(self, value):
_parse = {
datetime.datetime: self._parse_datetime,
datetime.timedelta: self._parse_timedelta,
bool: self._parse_bool,
str: self._parse_string,
}.get(self.type, self.type)
if self.multiple:
if self._value is None:
self._value = []
for part in value.split(","):
if self.type in (int, long):
# allow ranges of the form X:Y (inclusive at both ends)
lo, _, hi = part.partition(":")
lo = _parse(lo)
hi = _parse(hi) if hi else lo
self._value.extend(range(lo, hi+1))
else:
self._value.append(_parse(part))
else:
self._value = _parse(value)
return self.value()
def set(self, value):
if self.multiple:
if not isinstance(value, list):
raise Error("Option %r is required to be a list of %s" %
(self.name, self.type.__name__))
for item in value:
if item != None and not isinstance(item, self.type):
raise Error("Option %r is required to be a list of %s" %
(self.name, self.type.__name__))
else:
if value != None and not isinstance(value, self.type):
raise Error("Option %r is required to be a %s" %
(self.name, self.type.__name__))
self._value = value
# Supported date/time formats in our options
_DATETIME_FORMATS = [
"%a %b %d %H:%M:%S %Y",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M",
"%Y-%m-%dT%H:%M",
"%Y%m%d %H:%M:%S",
"%Y%m%d %H:%M",
"%Y-%m-%d",
"%Y%m%d",
"%H:%M:%S",
"%H:%M",
]
def _parse_datetime(self, value):
for format in self._DATETIME_FORMATS:
try:
return datetime.datetime.strptime(value, format)
except ValueError:
pass
raise Error('Unrecognized date/time format: %r' % value)
_TIMEDELTA_ABBREVS = [
('hours', ['h']),
('minutes', ['m', 'min']),
('seconds', ['s', 'sec']),
('milliseconds', ['ms']),
('microseconds', ['us']),
('days', ['d']),
('weeks', ['w']),
]
_TIMEDELTA_ABBREV_DICT = dict(
(abbrev, full) for full, abbrevs in _TIMEDELTA_ABBREVS
for abbrev in abbrevs)
_FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?'
_TIMEDELTA_PATTERN = re.compile(
r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE)
def _parse_timedelta(self, value):
try:
sum = datetime.timedelta()
start = 0
while start < len(value):
m = self._TIMEDELTA_PATTERN.match(value, start)
if not m:
raise Exception()
num = float(m.group(1))
units = m.group(2) or 'seconds'
units = self._TIMEDELTA_ABBREV_DICT.get(units, units)
sum += datetime.timedelta(**{units: num})
start = m.end()
return sum
except:
raise
def _parse_bool(self, value):
return value.lower() not in ("false", "0", "f")
def _parse_string(self, value):
return value.decode("utf-8")
class Error(Exception):
pass
def enable_pretty_logging():
"""Turns on formatted logging output as configured."""
root_logger = logging.getLogger()
if options.log_file_prefix:
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups)
channel.setFormatter(_LogFormatter(color=False))
root_logger.addHandler(channel)
if (options.log_to_stderr or
(options.log_to_stderr is None and not root_logger.handlers)):
# Set up color if we are in a tty and curses is installed
color = False
if curses and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except:
pass
channel = logging.StreamHandler()
channel.setFormatter(_LogFormatter(color=color))
root_logger.addHandler(channel)
class _LogFormatter(logging.Formatter):
def __init__(self, color, *args, **kwargs):
logging.Formatter.__init__(self, *args, **kwargs)
self._color = color
if color:
# The curses module has some str/bytes confusion in python3.
# Most methods return bytes, but only accept strings.
# The explict calls to unicode() below are harmless in python2,
# but will do the right conversion in python3.
fg_color = unicode(curses.tigetstr("setaf") or
curses.tigetstr("setf") or "", "ascii")
self._colors = {
logging.DEBUG: unicode(curses.tparm(fg_color, 4), # Blue
"ascii"),
logging.INFO: unicode(curses.tparm(fg_color, 2), # Green
"ascii"),
logging.WARNING: unicode(curses.tparm(fg_color, 3), # Yellow
"ascii"),
logging.ERROR: unicode(curses.tparm(fg_color, 1), # Red
"ascii"),
}
self._normal = unicode(curses.tigetstr("sgr0"), "ascii")
def format(self, record):
try:
record.message = record.getMessage()
except Exception, e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = time.strftime(
"%y%m%d %H:%M:%S", self.converter(record.created))
prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \
record.__dict__
if self._color:
prefix = (self._colors.get(record.levelno, self._normal) +
prefix + self._normal)
formatted = prefix + " " + record.message
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
formatted = formatted.rstrip() + "\n" + record.exc_text
return formatted.replace("\n", "\n ")
options = _Options.instance()
# Default options
define("help", type=bool, help="show this help information")
define("logging", default="info",
help=("Set the Python log level. If 'none', tornado won't touch the "
"logging configuration."),
metavar="info|warning|error|none")
define("log_to_stderr", type=bool, default=None,
help=("Send log output to stderr (colorized if possible). "
"By default use stderr if --log_file_prefix is not set and "
"no other logging is configured."))
define("log_file_prefix", type=str, default=None, metavar="PATH",
help=("Path prefix for log files. "
"Note that if you are running multiple tornado processes, "
"log_file_prefix must be different for each of them (e.g. "
"include the port number)"))
define("log_file_max_size", type=int, default=100 * 1000 * 1000,
help="max size of log files before rollover")
define("log_file_num_backups", type=int, default=10,
help="number of log files to keep")
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for converting Backends configuration to Modules configuration.
Uses existing backends.yaml and app.yaml files to create a separate
<module-name>.yaml file for each module defined in backends.yaml.
"""
import os
import sys
import warnings
from google.appengine.api import appinfo
from google.appengine.api import backendinfo
warnings.simplefilter('default')
__all__ = [
'ConvertBackendToModules',
]
START_URI = '/_ah/start'
LOGIN_ADMIN = 'admin'
DEPRECATION_TEXT = ('The failfast option is deprecated for Modules. No '
'equivalent option will be set.')
DYNAMIC_PROMPT_TEXT = """\
Backend %s is marked dynamic.
Dynamic backends should be converted to basic_scaling type.
Basic scaling modules require an integer max instances value.
Please provide the max_instances [default: 1]: """
MAIN_ERR_MESSAGE = """\
Backends and App Config filename arguments not passed
in correctly. Can't complete operation.
"""
PRINT_FILE_DELIMITER = ('=' * 80) + '\n' + ('=' * 80)
def _ToYAMLDefault(appinfo_config):
"""Converts an app config to default (alphabetical by key) YAML string.
Args:
appinfo_config: appinfo.AppInfoExternal object. Configuration object
for either a module or app.yaml.
Returns:
String containing YAML for the app config object.
"""
return appinfo_config.ToYAML()
def ConvertBackendToModules(backend_config_filename,
app_config_filename,
_to_yaml_method=_ToYAMLDefault):
"""Creates Modules configuration using filenames of app and backend config.
Tries to write config to a file for each module defined.
Args:
backend_config_filename: String; Relative path to backends.yaml passed in.
app_config_filename: String; Relative path to app.yaml passed in.
_to_yaml_method: A method which takes an appinfo.AppInfoExternal object and
converts it to a YAML string. Defaults to _ToYAMLDefault which just calls
ToYAML() on the object.
"""
with open(backend_config_filename, 'r') as fh:
backend_config = fh.read()
with open(app_config_filename, 'r') as fh:
app_config = fh.read()
application_root, app_config_filename = os.path.split(
os.path.abspath(app_config_filename))
converted_backends = _ConvertBackendToModules(backend_config, app_config)
for module_config in converted_backends:
_MaybeWriteConfigToFile(module_config, application_root,
_to_yaml_method=_to_yaml_method)
def _MaybeWriteConfigToFile(appinfo_config, application_root,
_to_yaml_method=_ToYAMLDefault):
"""Writes an app config to a file.
If the file already exists, prompts the user before saving. If the user
does not wish to overwrite the file, prints the would-be file contents.
Args:
appinfo_config: appinfo.AppInfoExternal object. Configuration object
for either a module or app.yaml.
application_root: String; an absolute path where the application to be
deployed is located on the local filesystem.
_to_yaml_method: A method which takes an appinfo.AppInfoExternal object and
converts it to a YAML string. Defaults to _ToYAMLDefault which just calls
ToYAML() on the object.
"""
filename = '%s.yaml' % (appinfo_config.module.encode('ascii'),)
filepath = os.path.join(application_root, filename)
contents = _to_yaml_method(appinfo_config)
if os.path.exists(filepath):
prompt = 'File %s exists. Overwrite? [y/N] ' % (filename,)
result = raw_input(prompt).strip()
if result != 'y':
print 'File %s not written.' % (filename,)
print 'Contents:'
print PRINT_FILE_DELIMITER
print contents
print PRINT_FILE_DELIMITER
return
with open(filepath, 'w') as fh:
fh.write(contents)
def _ConvertBackendToModules(backend_config, app_config):
"""Creates Modules configuration using app and backend config.
Parses the app.yaml and backend.yaml contents into native AppInfoExternal
and BackendInfoExternal objects and then creates an AppInfoExternal
for each backend defined in backend_config.
Args:
backend_config: String, the contents of backend.yaml.
app_config: String, the contents of app.yaml.
Returns:
A list of AppInfoExternal objects for each module.
"""
backend_info = backendinfo.LoadBackendInfo(backend_config)
app_yaml_config = appinfo.LoadSingleAppInfo(app_config)
return [_ConvertBackendToModule(backend, app_yaml_config)
for backend in backend_info.backends]
def _ConvertBackendToModule(backend_entry, app_yaml_config):
"""Converts an individual backend to a module config.
Args:
backend_entry: A backendinfo.BackendEntry object. Contains a parsed backend
definition from backends.yaml.
app_yaml_config: A appinfo.AppInfoExternal object. Contains parsed app.yaml.
Returns:
An appinfo.AppInfoExternal object which is a copy of app.yaml patched with
the backend definition.
"""
result = _CopyAppInfo(app_yaml_config)
_MaybeSetNotPublic(result, backend_entry)
_WarnFailFast(backend_entry)
_SetStart(result, backend_entry)
_SetModule(result, backend_entry)
_SetClass(result, backend_entry)
_SetInstances(result, backend_entry)
_SetDynamic(result, backend_entry)
return result
def _CopyAppInfo(app_yaml_config):
"""Deep copy of parsed YAML config.
Casts native YAML object to string and then back again.
Args:
app_yaml_config: A appinfo.AppInfoExternal object. Contains parsed app.yaml.
Returns:
Deep copy of app_yaml_config.
"""
as_yaml = app_yaml_config.ToYAML()
return appinfo.LoadSingleAppInfo(as_yaml)
def _MaybeSetNotPublic(target, backend_entry):
"""Attempts to set all handlers as login: admin if the backend is private.
Prompts user if this operation is desired before doing so. If the user
declines, does nothing.
Args:
target: A appinfo.AppInfoExternal object. Contains parsed app.yaml augmented
by current backend info.
backend_entry: A backendinfo.BackendEntry object. Contains a parsed backend
definition from backends.yaml.
"""
if backend_entry.public:
return
prompt = ('Backend %s is marked private.\nWould you like to make all '
'handlers \'login: admin\'? [y/N] ' % (backend_entry.name,))
result = raw_input(prompt).strip()
if result == 'y':
for handler in target.handlers:
handler.login = LOGIN_ADMIN
def _WarnFailFast(backend_entry):
"""Warns if the deprecated failfast option is used in the backend.
Args:
backend_entry: A backendinfo.BackendEntry object. Contains a parsed backend
definition from backends.yaml.
"""
if backend_entry.failfast:
warnings.warn(DEPRECATION_TEXT, DeprecationWarning)
def _RemoveStartHandler(app_yaml_config):
"""Removes a start handler from an application config if one is defined.
If multiple start handlers are defined, only the first would be used (since
routing goes in order of first to last).
Args:
app_yaml_config: A appinfo.AppInfoExternal object. Contains parsed app.yaml.
Returns:
Either None, if there is no start handler or the removed appinfo.URLMap
object containing the start handler info.
"""
handlers = app_yaml_config.handlers
start_handlers = []
for handler in handlers:
if handler.url == START_URI:
start_handlers.append(handler)
if start_handlers:
for start_handler in start_handlers:
handlers.remove(start_handler)
return start_handlers[0]
def _SetStart(target, backend_entry):
"""Attempts to set a start handler for the target module.
This only gets set if there is a start script defined for the backend. If
there was also a start handler in app.yaml, will copy this and use the
existing handler, replacing the script with the one from the backend.
Args:
target: A appinfo.AppInfoExternal object. Contains parsed app.yaml augmented
by current backend info.
backend_entry: A backendinfo.BackendEntry object. Contains a parsed backend
definition from backends.yaml.
"""
if backend_entry.start is None:
return
start_handler = _RemoveStartHandler(target)
if start_handler is None:
start_handler = appinfo.URLMap(url=START_URI, login=LOGIN_ADMIN)
start_handler.script = backend_entry.start
target.handlers.insert(0, start_handler)
def _SetModule(target, backend_entry):
"""Sets module name to backend name.
Args:
target: A appinfo.AppInfoExternal object. Contains parsed app.yaml augmented
by current backend info.
backend_entry: A backendinfo.BackendEntry object. Contains a parsed backend
definition from backends.yaml.
"""
target.module = backend_entry.name
def _SetClass(target, backend_entry):
"""Sets module instance class to backend instance class.
If there was no instance class defined on the backend, does nothing.
Args:
target: A appinfo.AppInfoExternal object. Contains parsed app.yaml augmented
by current backend info.
backend_entry: A backendinfo.BackendEntry object. Contains a parsed backend
definition from backends.yaml.
"""
curr_class = backend_entry.get_class()
if curr_class is not None:
target.instance_class = curr_class
def _SetInstances(target, backend_entry):
"""Sets number of instances for module if defined in backend.
If not defined in backend does nothing. Otherwise, sets the manual scaling
field to use the number of instances specified.
Args:
target: A appinfo.AppInfoExternal object. Contains parsed app.yaml augmented
by current backend info.
backend_entry: A backendinfo.BackendEntry object. Contains a parsed backend
definition from backends.yaml.
"""
instances = backend_entry.instances
if instances is not None:
target.manual_scaling = appinfo.ManualScaling(instances=instances)
def _SetDynamic(target, backend_entry):
"""Sets basic scaling if backend is dynamic.
If dynamic not set on the backend, does nothing. Otherwise, sets the
basic scaling field to use the number of instances provided via raw_input.
Args:
target: A appinfo.AppInfoExternal object. Contains parsed app.yaml augmented
by current backend info.
backend_entry: A backendinfo.BackendEntry object. Contains a parsed backend
definition from backends.yaml.
"""
if not backend_entry.dynamic:
return
prompt = DYNAMIC_PROMPT_TEXT % (backend_entry.name,)
result = raw_input(prompt).strip()
if result == '':
target.basic_scaling = appinfo.BasicScaling(max_instances=1)
return
max_instances = -1
try:
max_instances = int(result)
except (TypeError, ValueError):
pass
if max_instances <= 0:
print 'Invalid max_instances value: %r' % (result,)
return
target.basic_scaling = appinfo.BasicScaling(max_instances=max_instances)
def MakeParser(prog):
"""Create an argument parser.
Args:
prog: The name of the program to use when outputting help text.
Returns:
An argparse.ArgumentParser built to specification.
"""
import argparse
parser = argparse.ArgumentParser(prog=prog)
parser.add_argument('backend_config_filename', nargs=1,
help='Path to backends.yaml for application.')
parser.add_argument('app_config_filename', nargs=1,
help='Path to app.yaml for application.')
return parser
def main(argv):
parser = MakeParser(argv[0])
args = parser.parse_args(argv[1:])
backend_config_filename_args = getattr(args, 'backend_config_filename', [])
app_config_filename_args = getattr(args, 'app_config_filename', [])
if (len(backend_config_filename_args) != 1 or
len(app_config_filename_args) != 1):
print >>sys.stderr, MAIN_ERR_MESSAGE
return 1
ConvertBackendToModules(backend_config_filename_args[0],
app_config_filename_args[0])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
'''
This is a tool for comparing two or more bitcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print 'Block not in reject map: %064x' % (blockhash)
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print 'Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print 'Tx not in reject map: %064x' % (txhash)
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print 'Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
|
|
import logging
import json
import uuid
import tornado.web
import pymongo
import motor
from iceprod.server.rest import RESTHandler, RESTHandlerSetup, authorization
from iceprod.server.util import nowstr
logger = logging.getLogger('rest.pilots')
def setup(config, *args, **kwargs):
"""
Setup method for Pilots REST API.
Sets up any database connections or other prerequisites.
Args:
config (dict): an instance of :py:class:`iceprod.server.config`.
Returns:
list: Routes for logs, which can be passed to :py:class:`tornado.web.Application`.
"""
cfg_rest = config.get('rest',{}).get('pilots',{})
db_cfg = cfg_rest.get('database',{})
# add indexes
db = pymongo.MongoClient(**db_cfg).pilots
if 'pilot_id_index' not in db.pilots.index_information():
db.pilots.create_index('pilot_id', name='pilot_id_index', unique=True)
handler_cfg = RESTHandlerSetup(config, *args, **kwargs)
handler_cfg.update({
'database': motor.motor_tornado.MotorClient(**db_cfg).pilots,
})
return [
(r'/pilots', MultiPilotsHandler, handler_cfg),
(r'/pilots/(?P<pilot_id>\w+)', PilotsHandler, handler_cfg),
]
class BaseHandler(RESTHandler):
def initialize(self, database=None, **kwargs):
super(BaseHandler, self).initialize(**kwargs)
self.db = database
class MultiPilotsHandler(BaseHandler):
"""
Handle multi pilots requests.
"""
@authorization(roles=['admin','client','system'])
async def get(self):
"""
Get pilot entries.
Params (optional):
queue_host: queue_host to filter by
queue_version: queue_version to filter by
host: host to filter by
version: version to filter by
keys: | separated list of keys to return for each pilot
Returns:
dict: {'uuid': {pilot_data}}
"""
filters = {}
for k in ('queue_host','queue_version','host','version'):
tmp = self.get_argument(k, None)
if tmp:
filters[k] = tmp
projection = {'_id': False}
keys = self.get_argument('keys','')
if keys:
projection.update({x:True for x in keys.split('|') if x})
projection['pilot_id'] = True
ret = {}
async for row in self.db.pilots.find(filters,projection=projection):
ret[row['pilot_id']] = row
self.write(ret)
@authorization(roles=['admin','client'])
async def post(self):
"""
Create a pilot entry.
Body should contain the pilot data.
Returns:
dict: {'result': <pilot_id>}
"""
data = json.loads(self.request.body)
# validate first
req_fields = {
'queue_host': str,
'queue_version': str, # iceprod version
'resources': dict, # min resources requested
}
for k in req_fields:
if k not in data:
raise tornado.web.HTTPError(400, reason='missing key: '+k)
if not isinstance(data[k], req_fields[k]):
r = 'key {} should be of type {}'.format(k, req_fields[k])
raise tornado.web.HTTPError(400, reason=r)
# set some fields
data['pilot_id'] = uuid.uuid1().hex
data['submit_date'] = nowstr()
data['start_date'] = ''
data['last_update'] = data['submit_date']
if 'tasks' not in data:
data['tasks'] = []
if 'host' not in data:
data['host'] = ''
if 'site' not in data:
data['site'] = ''
if 'version' not in data:
data['version'] = ''
if 'grid_queue_id' not in data:
data['grid_queue_id'] = ''
if 'resources_available' not in data:
data['resources_available'] = {}
if 'resources_claimed' not in data:
data['resources_claimed'] = {}
ret = await self.db.pilots.insert_one(data)
self.set_status(201)
self.write({'result': data['pilot_id']})
self.finish()
class PilotsHandler(BaseHandler):
"""
Handle single pilot requests.
"""
@authorization(roles=['admin','client','pilot'])
async def get(self, pilot_id):
"""
Get a pilot entry.
Args:
pilot_id (str): the pilot id
Returns:
dict: pilot entry
"""
ret = await self.db.pilots.find_one({'pilot_id':pilot_id},
projection={'_id':False})
if not ret:
self.send_error(404, reason="Pilot not found")
else:
self.write(ret)
self.finish()
@authorization(roles=['admin','client','pilot'])
async def patch(self, pilot_id):
"""
Update a pilot entry.
Body should contain the pilot data to update. Note that this will
perform a merge (not replace).
Args:
pilot_id (str): the pilot id
Returns:
dict: updated pilot entry
"""
data = json.loads(self.request.body)
if not data:
raise tornado.web.HTTPError(400, reason='Missing update data')
data['last_update'] = nowstr()
ret = await self.db.pilots.find_one_and_update({'pilot_id':pilot_id},
{'$set':data},
projection={'_id':False},
return_document=pymongo.ReturnDocument.AFTER)
if not ret:
self.send_error(404, reason="Pilot not found")
else:
if 'site' in ret and ret['site']:
self.module.statsd.incr('site.{}.pilot'.format(ret['site']))
self.write(ret)
self.finish()
@authorization(roles=['admin','client','pilot'])
async def delete(self, pilot_id):
"""
Delete a pilot entry.
Args:
pilot_id (str): the pilot id
Returns:
dict: empty dict
"""
ret = await self.db.pilots.find_one_and_delete({'pilot_id':pilot_id})
if not ret:
self.send_error(404, reason="Pilot not found")
else:
if 'site' in ret and ret['site']:
self.module.statsd.incr('site.{}.pilot_delete'.format(ret['site']))
self.write({})
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Ioan Sucan, William Baker
from geometry_msgs.msg import Pose, PoseStamped
from moveit_msgs.msg import (
RobotTrajectory,
Grasp,
PlaceLocation,
Constraints,
RobotState,
)
from moveit_msgs.msg import (
MoveItErrorCodes,
TrajectoryConstraints,
PlannerInterfaceDescription,
MotionPlanRequest,
)
from sensor_msgs.msg import JointState
import rospy
import tf
from moveit_ros_planning_interface import _moveit_move_group_interface
from .exception import MoveItCommanderException
import moveit_commander.conversions as conversions
class MoveGroupCommander(object):
"""
Execution of simple commands for a particular group
"""
def __init__(
self, name, robot_description="robot_description", ns="", wait_for_servers=5.0
):
""" Specify the group name for which to construct this commander instance. Throws an exception if there is an initialization error. """
self._g = _moveit_move_group_interface.MoveGroupInterface(
name, robot_description, ns, wait_for_servers
)
def get_name(self):
""" Get the name of the group this instance was initialized for """
return self._g.get_name()
def stop(self):
""" Stop the current execution, if any """
self._g.stop()
def get_active_joints(self):
""" Get the active joints of this group """
return self._g.get_active_joints()
def get_joints(self):
""" Get the joints of this group """
return self._g.get_joints()
def get_variable_count(self):
""" Return the number of variables used to parameterize a state in this group (larger or equal to number of DOF)"""
return self._g.get_variable_count()
def has_end_effector_link(self):
""" Check if this group has a link that is considered to be an end effector """
return len(self._g.get_end_effector_link()) > 0
def get_end_effector_link(self):
""" Get the name of the link that is considered to be an end-effector. Return an empty string if there is no end-effector. """
return self._g.get_end_effector_link()
def set_end_effector_link(self, link_name):
""" Set the name of the link to be considered as an end effector """
if not self._g.set_end_effector_link(link_name):
raise MoveItCommanderException("Unable to set end effector link")
def get_interface_description(self):
""" Get the description of the planner interface (list of planner ids) """
desc = PlannerInterfaceDescription()
conversions.msg_from_string(desc, self._g.get_interface_description())
return desc
def get_pose_reference_frame(self):
""" Get the reference frame assumed for poses of end-effectors """
return self._g.get_pose_reference_frame()
def set_pose_reference_frame(self, reference_frame):
""" Set the reference frame to assume for poses of end-effectors """
self._g.set_pose_reference_frame(reference_frame)
def get_planning_frame(self):
""" Get the name of the frame where all planning is performed """
return self._g.get_planning_frame()
def get_current_joint_values(self):
""" Get the current configuration of the group as a list (these are values published on /joint_states) """
return self._g.get_current_joint_values()
def get_current_pose(self, end_effector_link=""):
""" Get the current pose of the end-effector of the group. Throws an exception if there is not end-effector. """
if len(end_effector_link) > 0 or self.has_end_effector_link():
return conversions.list_to_pose_stamped(
self._g.get_current_pose(end_effector_link), self.get_planning_frame()
)
else:
raise MoveItCommanderException(
"There is no end effector to get the pose of"
)
def get_current_rpy(self, end_effector_link=""):
""" Get a list of 3 elements defining the [roll, pitch, yaw] of the end-effector. Throws an exception if there is not end-effector. """
if len(end_effector_link) > 0 or self.has_end_effector_link():
return self._g.get_current_rpy(end_effector_link)
else:
raise MoveItCommanderException("There is no end effector to get the rpy of")
def get_random_joint_values(self):
return self._g.get_random_joint_values()
def get_random_pose(self, end_effector_link=""):
if len(end_effector_link) > 0 or self.has_end_effector_link():
return conversions.list_to_pose_stamped(
self._g.get_random_pose(end_effector_link), self.get_planning_frame()
)
else:
raise MoveItCommanderException(
"There is no end effector to get the pose of"
)
def set_start_state_to_current_state(self):
self._g.set_start_state_to_current_state()
def set_start_state(self, msg):
"""
Specify a start state for the group.
Parameters
----------
msg : moveit_msgs/RobotState
Examples
--------
>>> from moveit_msgs.msg import RobotState
>>> from sensor_msgs.msg import JointState
>>> joint_state = JointState()
>>> joint_state.header = Header()
>>> joint_state.header.stamp = rospy.Time.now()
>>> joint_state.name = ['joint_a', 'joint_b']
>>> joint_state.position = [0.17, 0.34]
>>> moveit_robot_state = RobotState()
>>> moveit_robot_state.joint_state = joint_state
>>> group.set_start_state(moveit_robot_state)
"""
self._g.set_start_state(conversions.msg_to_string(msg))
def get_current_state_bounded(self):
""" Get the current state of the robot bounded."""
s = RobotState()
c_str = self._g.get_current_state_bounded()
conversions.msg_from_string(s, c_str)
return s
def get_current_state(self):
""" Get the current state of the robot."""
s = RobotState()
c_str = self._g.get_current_state()
conversions.msg_from_string(s, c_str)
return s
def get_joint_value_target(self):
return self._g.get_joint_value_target()
def set_joint_value_target(self, arg1, arg2=None, arg3=None):
"""
Specify a target joint configuration for the group.
- if the type of arg1 is one of the following: dict, list, JointState message, then no other arguments should be provided.
The dict should specify pairs of joint variable names and their target values, the list should specify all the variable values
for the group. The JointState message specifies the positions of some single-dof joints.
- if the type of arg1 is string, then arg2 is expected to be defined and be either a real value or a list of real values. This is
interpreted as setting a particular joint to a particular value.
- if the type of arg1 is Pose or PoseStamped, both arg2 and arg3 could be defined. If arg2 or arg3 are defined, their types must
be either string or bool. The string type argument is interpreted as the end-effector the pose is specified for (default is to use
the default end-effector), and the bool is used to decide whether the pose specified is approximate (default is false). This situation
allows setting the joint target of the group by calling IK. This does not send a pose to the planner and the planner will do no IK.
Instead, one IK solution will be computed first, and that will be sent to the planner.
"""
if isinstance(arg1, RobotState):
if not self._g.set_state_value_target(conversions.msg_to_string(arg1)):
raise MoveItCommanderException(
"Error setting state target. Is the target state within bounds?"
)
elif isinstance(arg1, JointState):
if arg2 is not None or arg3 is not None:
raise MoveItCommanderException("Too many arguments specified")
if not self._g.set_joint_value_target_from_joint_state_message(
conversions.msg_to_string(arg1)
):
raise MoveItCommanderException(
"Error setting joint target. Is the target within bounds?"
)
elif isinstance(arg1, str):
if arg2 is None:
raise MoveItCommanderException(
"Joint value expected when joint name specified"
)
if arg3 is not None:
raise MoveItCommanderException("Too many arguments specified")
if not self._g.set_joint_value_target(arg1, arg2):
raise MoveItCommanderException(
"Error setting joint target. Is the target within bounds?"
)
elif isinstance(arg1, (Pose, PoseStamped)):
approx = False
eef = ""
if arg2 is not None:
if type(arg2) is str:
eef = arg2
else:
if type(arg2) is bool:
approx = arg2
else:
raise MoveItCommanderException("Unexpected type")
if arg3 is not None:
if type(arg3) is str:
eef = arg3
else:
if type(arg3) is bool:
approx = arg3
else:
raise MoveItCommanderException("Unexpected type")
r = False
if type(arg1) is PoseStamped:
r = self._g.set_joint_value_target_from_pose_stamped(
conversions.msg_to_string(arg1), eef, approx
)
else:
r = self._g.set_joint_value_target_from_pose(
conversions.msg_to_string(arg1), eef, approx
)
if not r:
if approx:
raise MoveItCommanderException(
"Error setting joint target. Does your IK solver support approximate IK?"
)
else:
raise MoveItCommanderException(
"Error setting joint target. Is the IK solver functional?"
)
elif hasattr(arg1, "__iter__"):
if arg2 is not None or arg3 is not None:
raise MoveItCommanderException("Too many arguments specified")
if not self._g.set_joint_value_target(arg1):
raise MoveItCommanderException(
"Error setting joint target. Is the target within bounds?"
)
else:
raise MoveItCommanderException(
"Unsupported argument of type %s" % type(arg1)
)
def set_rpy_target(self, rpy, end_effector_link=""):
""" Specify a target orientation for the end-effector. Any position of the end-effector is acceptable."""
if len(end_effector_link) > 0 or self.has_end_effector_link():
if len(rpy) == 3:
if not self._g.set_rpy_target(
rpy[0], rpy[1], rpy[2], end_effector_link
):
raise MoveItCommanderException("Unable to set orientation target")
else:
raise MoveItCommanderException("Expected [roll, pitch, yaw]")
else:
raise MoveItCommanderException(
"There is no end effector to set the pose for"
)
def set_orientation_target(self, q, end_effector_link=""):
""" Specify a target orientation for the end-effector. Any position of the end-effector is acceptable."""
if len(end_effector_link) > 0 or self.has_end_effector_link():
if len(q) == 4:
if not self._g.set_orientation_target(
q[0], q[1], q[2], q[3], end_effector_link
):
raise MoveItCommanderException("Unable to set orientation target")
else:
raise MoveItCommanderException("Expected [qx, qy, qz, qw]")
else:
raise MoveItCommanderException(
"There is no end effector to set the pose for"
)
def set_position_target(self, xyz, end_effector_link=""):
""" Specify a target position for the end-effector. Any orientation of the end-effector is acceptable."""
if len(end_effector_link) > 0 or self.has_end_effector_link():
if not self._g.set_position_target(
xyz[0], xyz[1], xyz[2], end_effector_link
):
raise MoveItCommanderException("Unable to set position target")
else:
raise MoveItCommanderException(
"There is no end effector to set the pose for"
)
def set_pose_target(self, pose, end_effector_link=""):
""" Set the pose of the end-effector, if one is available. The expected input is a Pose message, a PoseStamped message or a list of 6 floats:"""
""" [x, y, z, rot_x, rot_y, rot_z] or a list of 7 floats [x, y, z, qx, qy, qz, qw] """
if len(end_effector_link) > 0 or self.has_end_effector_link():
ok = False
if type(pose) is PoseStamped:
old = self.get_pose_reference_frame()
self.set_pose_reference_frame(pose.header.frame_id)
ok = self._g.set_pose_target(
conversions.pose_to_list(pose.pose), end_effector_link
)
self.set_pose_reference_frame(old)
elif type(pose) is Pose:
ok = self._g.set_pose_target(
conversions.pose_to_list(pose), end_effector_link
)
else:
ok = self._g.set_pose_target(pose, end_effector_link)
if not ok:
raise MoveItCommanderException("Unable to set target pose")
else:
raise MoveItCommanderException(
"There is no end effector to set the pose for"
)
def set_pose_targets(self, poses, end_effector_link=""):
""" Set the pose of the end-effector, if one is available. The expected input is a list of poses. Each pose can be a Pose message, a list of 6 floats: [x, y, z, rot_x, rot_y, rot_z] or a list of 7 floats [x, y, z, qx, qy, qz, qw] """
if len(end_effector_link) > 0 or self.has_end_effector_link():
if not self._g.set_pose_targets(
[conversions.pose_to_list(p) if type(p) is Pose else p for p in poses],
end_effector_link,
):
raise MoveItCommanderException("Unable to set target poses")
else:
raise MoveItCommanderException("There is no end effector to set poses for")
def shift_pose_target(self, axis, value, end_effector_link=""):
""" Get the current pose of the end effector, add value to the corresponding axis (0..5: X, Y, Z, R, P, Y) and set the new pose as the pose target """
if len(end_effector_link) > 0 or self.has_end_effector_link():
pose = self._g.get_current_pose(end_effector_link)
# by default we get orientation as a quaternion list
# if we are updating a rotation axis however, we convert the orientation to RPY
if axis > 2:
(r, p, y) = tf.transformations.euler_from_quaternion(pose[3:])
pose = [pose[0], pose[1], pose[2], r, p, y]
if axis >= 0 and axis < 6:
pose[axis] = pose[axis] + value
self.set_pose_target(pose, end_effector_link)
else:
raise MoveItCommanderException("An axis value between 0 and 5 expected")
else:
raise MoveItCommanderException("There is no end effector to set poses for")
def clear_pose_target(self, end_effector_link):
""" Clear the pose target for a particular end-effector """
self._g.clear_pose_target(end_effector_link)
def clear_pose_targets(self):
""" Clear all known pose targets """
self._g.clear_pose_targets()
def set_random_target(self):
""" Set a random joint configuration target """
self._g.set_random_target()
def get_named_targets(self):
""" Get a list of all the names of joint configurations."""
return self._g.get_named_targets()
def set_named_target(self, name):
""" Set a joint configuration by name. The name can be a name previlusy remembered with remember_joint_values() or a configuration specified in the SRDF. """
if not self._g.set_named_target(name):
raise MoveItCommanderException(
"Unable to set target %s. Is the target within bounds?" % name
)
def get_named_target_values(self, target):
"""Get a dictionary of joint values of a named target"""
return self._g.get_named_target_values(target)
def remember_joint_values(self, name, values=None):
""" Record the specified joint configuration of the group under the specified name. If no values are specified, the current state of the group is recorded. """
if values is None:
values = self.get_current_joint_values()
self._g.remember_joint_values(name, values)
def get_remembered_joint_values(self):
""" Get a dictionary that maps names to joint configurations for the group """
return self._g.get_remembered_joint_values()
def forget_joint_values(self, name):
""" Forget a stored joint configuration """
self._g.forget_joint_values(name)
def get_goal_tolerance(self):
""" Return a tuple of goal tolerances: joint, position and orientation. """
return (
self.get_goal_joint_tolerance(),
self.get_goal_position_tolerance(),
self.get_goal_orientation_tolerance(),
)
def get_goal_joint_tolerance(self):
""" Get the tolerance for achieving a joint goal (distance for each joint variable) """
return self._g.get_goal_joint_tolerance()
def get_goal_position_tolerance(self):
""" When moving to a position goal or to a pose goal, the tolerance for the goal position is specified as the radius a sphere around the target origin of the end-effector """
return self._g.get_goal_position_tolerance()
def get_goal_orientation_tolerance(self):
""" When moving to an orientation goal or to a pose goal, the tolerance for the goal orientation is specified as the distance (roll, pitch, yaw) to the target origin of the end-effector """
return self._g.get_goal_orientation_tolerance()
def set_goal_tolerance(self, value):
""" Set the joint, position and orientation goal tolerances simultaneously """
self._g.set_goal_tolerance(value)
def set_goal_joint_tolerance(self, value):
""" Set the tolerance for a target joint configuration """
self._g.set_goal_joint_tolerance(value)
def set_goal_position_tolerance(self, value):
""" Set the tolerance for a target end-effector position """
self._g.set_goal_position_tolerance(value)
def set_goal_orientation_tolerance(self, value):
""" Set the tolerance for a target end-effector orientation """
self._g.set_goal_orientation_tolerance(value)
def allow_looking(self, value):
""" Enable/disable looking around for motion planning """
self._g.allow_looking(value)
def allow_replanning(self, value):
""" Enable/disable replanning """
self._g.allow_replanning(value)
def get_known_constraints(self):
""" Get a list of names for the constraints specific for this group, as read from the warehouse """
return self._g.get_known_constraints()
def get_path_constraints(self):
""" Get the acutal path constraints in form of a moveit_msgs.msgs.Constraints """
c = Constraints()
c_str = self._g.get_path_constraints()
conversions.msg_from_string(c, c_str)
return c
def set_path_constraints(self, value):
""" Specify the path constraints to be used (as read from the database) """
if value is None:
self.clear_path_constraints()
else:
if type(value) is Constraints:
self._g.set_path_constraints_from_msg(conversions.msg_to_string(value))
elif not self._g.set_path_constraints(value):
raise MoveItCommanderException(
"Unable to set path constraints " + value
)
def clear_path_constraints(self):
""" Specify that no path constraints are to be used during motion planning """
self._g.clear_path_constraints()
def get_trajectory_constraints(self):
""" Get the actual trajectory constraints in form of a moveit_msgs.msgs.TrajectoryConstraints """
c = TrajectoryConstraints()
c_str = self._g.get_trajectory_constraints()
conversions.msg_from_string(c, c_str)
return c
def set_trajectory_constraints(self, value):
""" Specify the trajectory constraints to be used (setting from database is not implemented yet)"""
if value is None:
self.clear_trajectory_constraints()
else:
if type(value) is TrajectoryConstraints:
self._g.set_trajectory_constraints_from_msg(
conversions.msg_to_string(value)
)
else:
raise MoveItCommanderException(
"Unable to set trajectory constraints " + value
)
def clear_trajectory_constraints(self):
""" Specify that no trajectory constraints are to be used during motion planning """
self._g.clear_trajectory_constraints()
def set_constraints_database(self, host, port):
""" Specify which database to connect to for loading possible path constraints """
self._g.set_constraints_database(host, port)
def set_planning_time(self, seconds):
""" Specify the amount of time to be used for motion planning. """
self._g.set_planning_time(seconds)
def get_planning_time(self):
""" Specify the amount of time to be used for motion planning. """
return self._g.get_planning_time()
def set_planning_pipeline_id(self, planning_pipeline):
""" Specify which planning pipeline to use when motion planning (e.g. ompl, pilz_industrial_motion_planner) """
self._g.set_planning_pipeline_id(planning_pipeline)
def get_planning_pipeline_id(self):
""" Get the current planning_pipeline_id (e.g. ompl, pilz_industrial_motion_planner) """
return self._g.get_planning_pipeline_id()
def set_planner_id(self, planner_id):
""" Specify which planner of the currently selected pipeline to use when motion planning (e.g. RRTConnect, LIN) """
self._g.set_planner_id(planner_id)
def get_planner_id(self):
""" Get the current planner_id (e.g. RRTConnect, LIN) of the currently selected pipeline """
return self._g.get_planner_id()
def set_num_planning_attempts(self, num_planning_attempts):
""" Set the number of times the motion plan is to be computed from scratch before the shortest solution is returned. The default value is 1. """
self._g.set_num_planning_attempts(num_planning_attempts)
def set_workspace(self, ws):
""" Set the workspace for the robot as either [], [minX, minY, maxX, maxY] or [minX, minY, minZ, maxX, maxY, maxZ] """
if len(ws) == 0:
self._g.set_workspace(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
else:
if len(ws) == 4:
self._g.set_workspace(ws[0], ws[1], 0.0, ws[2], ws[3], 0.0)
else:
if len(ws) == 6:
self._g.set_workspace(ws[0], ws[1], ws[2], ws[3], ws[4], ws[5])
else:
raise MoveItCommanderException(
"Expected 0, 4 or 6 values in list specifying workspace"
)
def set_max_velocity_scaling_factor(self, value):
"""Set a scaling factor to reduce the maximum joint velocities. Allowed values are in (0,1].
The default value is set in the joint_limits.yaml of the moveit_config package."""
if value > 0 and value <= 1:
self._g.set_max_velocity_scaling_factor(value)
else:
raise MoveItCommanderException(
"Expected value in the range from 0 to 1 for scaling factor"
)
def set_max_acceleration_scaling_factor(self, value):
"""Set a scaling factor to reduce the maximum joint accelerations. Allowed values are in (0,1].
The default value is set in the joint_limits.yaml of the moveit_config package."""
if value > 0 and value <= 1:
self._g.set_max_acceleration_scaling_factor(value)
else:
raise MoveItCommanderException(
"Expected value in the range from 0 to 1 for scaling factor"
)
def go(self, joints=None, wait=True):
""" Set the target of the group and then move the group to the specified target """
if type(joints) is bool:
wait = joints
joints = None
elif type(joints) is JointState:
self.set_joint_value_target(joints)
elif type(joints) is Pose:
self.set_pose_target(joints)
elif joints is not None:
try:
self.set_joint_value_target(self.get_remembered_joint_values()[joints])
except (KeyError, TypeError):
self.set_joint_value_target(joints)
if wait:
return self._g.move()
else:
return self._g.async_move()
def plan(self, joints=None):
"""Return a tuple of the motion planning results such as
(success flag : boolean, trajectory message : RobotTrajectory,
planning time : float, error code : MoveitErrorCodes)"""
if type(joints) is JointState:
self.set_joint_value_target(joints)
elif type(joints) is Pose:
self.set_pose_target(joints)
elif joints is not None:
try:
self.set_joint_value_target(self.get_remembered_joint_values()[joints])
except MoveItCommanderException:
self.set_joint_value_target(joints)
(error_code_msg, trajectory_msg, planning_time) = self._g.plan()
error_code = MoveItErrorCodes()
error_code.deserialize(error_code_msg)
plan = RobotTrajectory()
return (
error_code.val == MoveItErrorCodes.SUCCESS,
plan.deserialize(trajectory_msg),
planning_time,
error_code,
)
def construct_motion_plan_request(self):
""" Returns a MotionPlanRequest filled with the current goals of the move_group_interface"""
mpr = MotionPlanRequest()
return mpr.deserialize(self._g.construct_motion_plan_request())
def compute_cartesian_path(
self,
waypoints,
eef_step,
jump_threshold,
avoid_collisions=True,
path_constraints=None,
):
""" Compute a sequence of waypoints that make the end-effector move in straight line segments that follow the poses specified as waypoints. Configurations are computed for every eef_step meters; The jump_threshold specifies the maximum distance in configuration space between consecutive points in the resultingpath; Kinematic constraints for the path given by path_constraints will be met for every point along the trajectory, if they are not met, a partial solution will be returned. The return value is a tuple: a fraction of how much of the path was followed, the actual RobotTrajectory. """
if path_constraints:
if type(path_constraints) is Constraints:
constraints_str = conversions.msg_to_string(path_constraints)
else:
raise MoveItCommanderException(
"Unable to set path constraints, unknown constraint type "
+ type(path_constraints)
)
(ser_path, fraction) = self._g.compute_cartesian_path(
[conversions.pose_to_list(p) for p in waypoints],
eef_step,
jump_threshold,
avoid_collisions,
constraints_str,
)
else:
(ser_path, fraction) = self._g.compute_cartesian_path(
[conversions.pose_to_list(p) for p in waypoints],
eef_step,
jump_threshold,
avoid_collisions,
)
path = RobotTrajectory()
path.deserialize(ser_path)
return (path, fraction)
def execute(self, plan_msg, wait=True):
"""Execute a previously planned path"""
if wait:
return self._g.execute(conversions.msg_to_string(plan_msg))
else:
return self._g.async_execute(conversions.msg_to_string(plan_msg))
def attach_object(self, object_name, link_name="", touch_links=[]):
""" Given the name of an object existing in the planning scene, attach it to a link. The link used is specified by the second argument. If left unspecified, the end-effector link is used, if one is known. If there is no end-effector link, the first link in the group is used. If no link is identified, failure is reported. True is returned if an attach request was succesfully sent to the move_group node. This does not verify that the attach request also was successfuly applied by move_group."""
return self._g.attach_object(object_name, link_name, touch_links)
def detach_object(self, name=""):
""" Given the name of a link, detach the object(s) from that link. If no such link exists, the name is interpreted as an object name. If there is no name specified, an attempt is made to detach all objects attached to any link in the group."""
return self._g.detach_object(name)
def pick(self, object_name, grasp=[], plan_only=False):
"""Pick the named object. A grasp message, or a list of Grasp messages can also be specified as argument."""
if type(grasp) is Grasp:
return self._g.pick(
object_name, conversions.msg_to_string(grasp), plan_only
)
else:
return self._g.pick(
object_name, [conversions.msg_to_string(x) for x in grasp], plan_only
)
def place(self, object_name, location=None, plan_only=False):
"""Place the named object at a particular location in the environment or somewhere safe in the world if location is not provided"""
result = False
if not location:
result = self._g.place(object_name, plan_only)
elif type(location) is PoseStamped:
old = self.get_pose_reference_frame()
self.set_pose_reference_frame(location.header.frame_id)
result = self._g.place(
object_name, conversions.pose_to_list(location.pose), plan_only
)
self.set_pose_reference_frame(old)
elif type(location) is Pose:
result = self._g.place(
object_name, conversions.pose_to_list(location), plan_only
)
elif type(location) is PlaceLocation:
result = self._g.place(
object_name, conversions.msg_to_string(location), plan_only
)
elif type(location) is list:
if location:
if type(location[0]) is PlaceLocation:
result = self._g.place_locations_list(
object_name,
[conversions.msg_to_string(x) for x in location],
plan_only,
)
elif type(location[0]) is PoseStamped:
result = self._g.place_poses_list(
object_name,
[conversions.msg_to_string(x) for x in location],
plan_only,
)
else:
raise MoveItCommanderException(
"Parameter location must be a Pose, PoseStamped, PlaceLocation, list of PoseStamped or list of PlaceLocation object"
)
else:
raise MoveItCommanderException(
"Parameter location must be a Pose, PoseStamped, PlaceLocation, list of PoseStamped or list of PlaceLocation object"
)
return result
def set_support_surface_name(self, value):
""" Set the support surface name for a place operation """
self._g.set_support_surface_name(value)
def retime_trajectory(
self,
ref_state_in,
traj_in,
velocity_scaling_factor=1.0,
acceleration_scaling_factor=1.0,
algorithm="iterative_time_parameterization",
):
ser_ref_state_in = conversions.msg_to_string(ref_state_in)
ser_traj_in = conversions.msg_to_string(traj_in)
ser_traj_out = self._g.retime_trajectory(
ser_ref_state_in,
ser_traj_in,
velocity_scaling_factor,
acceleration_scaling_factor,
algorithm,
)
traj_out = RobotTrajectory()
traj_out.deserialize(ser_traj_out)
return traj_out
def get_jacobian_matrix(self, joint_values, reference_point=None):
""" Get the jacobian matrix of the group as a list"""
return self._g.get_jacobian_matrix(
joint_values,
[0.0, 0.0, 0.0] if reference_point is None else reference_point,
)
def enforce_bounds(self, robot_state_msg):
""" Takes a moveit_msgs RobotState and enforces the state bounds, based on the C++ RobotState enforceBounds() """
s = RobotState()
c_str = self._g.enforce_bounds(conversions.msg_to_string(robot_state_msg))
conversions.msg_from_string(s, c_str)
return s
|
|
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import six
import socket
import acos_client
from acos_client import errors as acos_errors
from acos_client.v21 import axapi_http as v21_http
from acos_client.v21.dns import DNS as v21_DNS
from acos_client.v21.ha import HA as v21_HA
from acos_client.v21.interface import Interface as v21_Interface
from acos_client.v21.license_manager import LicenseManager as v21_LicenseManager
from acos_client.v21.nat import Nat as v21_Nat
from acos_client.v21.network import Network as v21_Network
from acos_client.v21.session import Session as v21_Session
from acos_client.v21.sflow import SFlow as v21_SFlow
from acos_client.v21.slb import SLB as v21_SLB
from acos_client.v21.system import System as v21_System
from acos_client.v21.vrrp_a import VRRPA as v21_VRRPA
from acos_client.v30 import axapi_http as v30_http
from acos_client.v30.delete.delete import Delete
from acos_client.v30.device_context import DeviceContext as v30_DeviceContext
from acos_client.v30.dns import DNS as v30_DNS
from acos_client.v30.file import File as v30_File
from acos_client.v30.glm.flexpool import Flexpool as Flexpool
from acos_client.v30.ha import HA as v30_HA
from acos_client.v30.interface import Interface as v30_Interface
from acos_client.v30.license_manager import LicenseManager as v30_LicenseManager
from acos_client.v30.nat import Nat as v30_Nat
from acos_client.v30.network import Network as v30_Network
from acos_client.v30.overlay import Overlay as v30_Overlay
from acos_client.v30.route import RIB as v30_RIB
from acos_client.v30.session import Session as v30_Session
from acos_client.v30.sflow import SFlow as v30_SFlow
from acos_client.v30.slb import SLB as v30_SLB
from acos_client.v30.system import System as v30_System
from acos_client.v30.vlan import Vlan as v30_Vlan
from acos_client.v30.vrrpa.vrid import VRID as v30_VRRPA
VERSION_IMPORTS = {
'21': {
'DNS': v21_DNS,
'http': v21_http,
'HA': v21_HA,
'Interface': v21_Interface,
'LicenseManager': v21_LicenseManager,
'Nat': v21_Nat,
'Network': v21_Network,
'Session': v21_Session,
'SFlow': v21_SFlow,
'SLB': v21_SLB,
'System': v21_System,
'Vlan': None,
'VRRPA': v21_VRRPA
},
'30': {
'DNS': v30_DNS,
'http': v30_http,
'Interface': v30_Interface,
'HA': v30_HA,
'LicenseManager': v30_LicenseManager,
'Nat': v30_Nat,
'Network': v30_Network,
'Overlay': v30_Overlay,
'RIB': v30_RIB,
'Session': v30_Session,
'SFlow': v30_SFlow,
'SLB': v30_SLB,
'System': v30_System,
'File': v30_File,
'Vlan': v30_Vlan,
'VRRPA': v30_VRRPA,
'DeviceContext': v30_DeviceContext,
'Flexpool': Flexpool,
'Delete': Delete,
},
}
LOG = logging.getLogger(__name__)
class Client(object):
def __init__(
self,
host, # ip address or name of the A10 device
version, # either 21 or 30
username, # username to use for authenticating to the A10 device
password, # password to use for authenticating to the A10 device
max_retries=3, # number of times to retry a connection before giving up
port=None, # TCP port to use for connecting to the A10 device
protocol="https", # transport protocol - http or https, encryption recommended
timeout=5 # seconds to wait for return data before giving up
):
self._version = self._just_digits(version)
if self._version not in acos_client.AXAPI_VERSIONS:
raise acos_errors.ACOSUnsupportedVersion()
self.max_retries = max_retries
self.timeout = timeout
self.host = host
self.port = port
self.http = VERSION_IMPORTS[self._version]['http'].HttpClient(
host, port, protocol, max_retries=self.max_retries, timeout=timeout
)
self.session = VERSION_IMPORTS[self._version]['Session'](self, username, password)
self.current_partition = 'shared'
def _just_digits(self, s):
return ''.join(i for i in str(s) if i.isdigit())
@property
def dns(self):
return VERSION_IMPORTS[self._version]['DNS'](self)
@property
def ha(self):
return VERSION_IMPORTS[self._version]['HA'](self)
@property
def interface(self):
return VERSION_IMPORTS[self._version]['Interface'](self)
@property
def system(self):
return VERSION_IMPORTS[self._version]['System'](self)
@property
def slb(self):
return VERSION_IMPORTS[self._version]['SLB'](self)
@property
def network(self):
return VERSION_IMPORTS[self._version]['Network'](self)
@property
def nat(self):
return VERSION_IMPORTS[self._version]['Nat'](self)
@property
def file(self):
return VERSION_IMPORTS[self._version]['File'](self)
@property
def sflow(self):
return VERSION_IMPORTS[self._version]['SFlow'](self)
@property
def license_manager(self):
LOG.warning("The paygo method been deprecated and will be removed "
"in a future release.")
return VERSION_IMPORTS[self._version]["LicenseManager"](self)
@property
def glm(self):
if self._version != '30':
LOG.error("AXAPIv21 is not supported for the glm attribute")
return
return VERSION_IMPORTS['30']["Flexpool"](self)
@property
def overlay(self):
return VERSION_IMPORTS[self._version]["Overlay"](self)
@property
def vlan(self):
return VERSION_IMPORTS[self._version]["Vlan"](self)
@property
def route(self):
return VERSION_IMPORTS[self._version]["RIB"](self)
@property
def vrrpa(self):
return VERSION_IMPORTS[self._version]["VRRPA"](self)
@property
def device_context(self):
return VERSION_IMPORTS[self._version]["DeviceContext"](self)
@property
def delete(self):
if self._version != '30':
LOG.error("AXAPIv21 is not supported for the delete attribute")
return
return VERSION_IMPORTS['30']["Delete"](self)
def wait_for_connect(self, max_timeout=60):
for i in six.moves.range(0, max_timeout):
try:
LOG.debug("wait_for_connect: attempting %s", self.host)
s = socket.create_connection((self.host, self.port), 1.0)
s.close()
LOG.debug("wait_for_connect: connected %s", self.host)
break
except socket.error:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.