gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import pwd
import os
from ooni.managers import ReportEntryManager, MeasurementManager
from ooni.reporter import Report
from ooni.utils import log, generate_filename
from ooni.utils.net import randomFreePort
from ooni.nettest import NetTest, getNetTestInformation
from ooni.settings import config
from ooni import errors
from ooni.nettest import test_class_name_to_name
from txtorcon import TorConfig, TorState, launch_tor, build_tor_connection
from twisted.internet import defer, reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
class Director(object):
"""
Singleton object responsible for coordinating the Measurements Manager
and the Reporting Manager.
How this all looks like is as follows:
+------------------------------------------------+
| Director |<--+
+------------------------------------------------+ |
^ ^ |
| Measurement | |
+---------+ [---------] +--------------------+ |
| | | MeasurementManager | |
| NetTest | [---------] +--------------------+ |
| | | [----------------] | |
+---------+ [---------] | [----------------] | |
| | [----------------] | |
| +--------------------+ |
v |
+---------+ ReportEntry |
| | [---------] +--------------------+ |
| Report | | ReportEntryManager | |
| | [---------] +--------------------+ |
+---------+ | [----------------] | |
[---------] | [----------------] |--
| [----------------] |
+--------------------+
[------------] are Tasks
+------+
| | are TaskManagers
+------+
| |
+------+
+------+
| | are general purpose objects
+------+
"""
_scheduledTests = 0
# Only list NetTests belonging to these categories
categories = ['blocking', 'manipulation']
def __init__(self):
self.activeNetTests = []
self.measurementManager = MeasurementManager()
self.measurementManager.director = self
self.reportEntryManager = ReportEntryManager()
self.reportEntryManager.director = self
# Link the TaskManager's by least available slots.
self.measurementManager.child = self.reportEntryManager
# Notify the parent when tasks complete # XXX deadlock!?
self.reportEntryManager.parent = self.measurementManager
self.successfulMeasurements = 0
self.failedMeasurements = 0
self.totalMeasurements = 0
# The cumulative runtime of all the measurements
self.totalMeasurementRuntime = 0
self.failures = []
self.torControlProtocol = None
# This deferred is fired once all the measurements and their reporting
# tasks are completed.
self.allTestsDone = defer.Deferred()
self.sniffers = {}
def getNetTests(self):
nettests = {}
def is_nettest(filename):
return not filename == '__init__.py' and filename.endswith('.py')
for category in self.categories:
dirname = os.path.join(config.nettest_directory, category)
# print path to all filenames.
for filename in os.listdir(dirname):
if is_nettest(filename):
net_test_file = os.path.join(dirname, filename)
try:
nettest = getNetTestInformation(net_test_file)
except:
log.err("Error processing %s" % filename)
continue
nettest['category'] = category.replace('/', '')
if nettest['id'] in nettests:
log.err("Found a two tests with the same name %s, %s" %
(net_test_file,
nettests[nettest['id']]['path']))
else:
category = dirname.replace(config.nettest_directory,
'')
nettests[nettest['id']] = nettest
return nettests
@defer.inlineCallbacks
def start(self, start_tor=False, check_incoherences=True):
self.netTests = self.getNetTests()
if start_tor:
if check_incoherences:
yield config.check_tor()
if config.advanced.start_tor:
yield self.startTor()
elif config.tor.control_port:
log.msg("Connecting to Tor Control Port...")
yield self.getTorState()
if config.global_options['no-geoip']:
aux = [False]
if config.global_options.get('annotations') is not None:
annotations = [k.lower() for k in config.global_options['annotations'].keys()]
aux = map(lambda x: x in annotations, ["city", "country", "asn"])
if not all(aux):
log.msg("You should add annotations for the country, city and ASN")
else:
yield config.probe_ip.lookup()
@property
def measurementSuccessRatio(self):
if self.totalMeasurements == 0:
return 0
return self.successfulMeasurements / self.totalMeasurements
@property
def measurementFailureRatio(self):
if self.totalMeasurements == 0:
return 0
return self.failedMeasurements / self.totalMeasurements
@property
def measurementSuccessRate(self):
"""
The speed at which tests are succeeding globally.
This means that fast tests that perform a lot of measurements will
impact this value quite heavily.
"""
if self.totalMeasurementRuntime == 0:
return 0
return self.successfulMeasurements / self.totalMeasurementRuntime
@property
def measurementFailureRate(self):
"""
The speed at which tests are failing globally.
"""
if self.totalMeasurementRuntime == 0:
return 0
return self.failedMeasurements / self.totalMeasurementRuntime
def measurementTimedOut(self, measurement):
"""
This gets called every time a measurement times out independenty from
the fact that it gets re-scheduled or not.
"""
pass
def measurementStarted(self, measurement):
self.totalMeasurements += 1
def measurementSucceeded(self, result, measurement):
log.debug("Successfully completed measurement: %s" % measurement)
self.totalMeasurementRuntime += measurement.runtime
self.successfulMeasurements += 1
measurement.result = result
test_name = test_class_name_to_name(measurement.testInstance.name)
if test_name in self.sniffers:
sniffer = self.sniffers[test_name]
config.scapyFactory.unRegisterProtocol(sniffer)
sniffer.close()
del self.sniffers[test_name]
return measurement
def measurementFailed(self, failure, measurement):
log.debug("Failed doing measurement: %s" % measurement)
self.totalMeasurementRuntime += measurement.runtime
self.failedMeasurements += 1
measurement.result = failure
return measurement
def reporterFailed(self, failure, net_test):
"""
This gets called every time a reporter is failing and has been removed
from the reporters of a NetTest.
Once a report has failed to be created that net_test will never use the
reporter again.
XXX hook some logic here.
note: failure contains an extra attribute called failure.reporter
"""
pass
def netTestDone(self, net_test):
self.activeNetTests.remove(net_test)
if len(self.activeNetTests) == 0:
self.allTestsDone.callback(None)
@defer.inlineCallbacks
def startNetTest(self, net_test_loader, report_filename,
collector_address=None):
"""
Create the Report for the NetTest and start the report NetTest.
Args:
net_test_loader:
an instance of :class:ooni.nettest.NetTestLoader
"""
if self.allTestsDone.called:
self.allTestsDone = defer.Deferred()
if config.privacy.includepcap:
self.startSniffing(net_test_loader.testDetails)
report = Report(net_test_loader.testDetails, report_filename,
self.reportEntryManager, collector_address)
net_test = NetTest(net_test_loader, report)
net_test.director = self
yield net_test.report.open()
yield net_test.initializeInputProcessor()
try:
self.activeNetTests.append(net_test)
self.measurementManager.schedule(net_test.generateMeasurements())
yield net_test.done
yield report.close()
finally:
self.netTestDone(net_test)
def startSniffing(self, testDetails):
""" Start sniffing with Scapy. Exits if required privileges (root) are not
available.
"""
from ooni.utils.txscapy import ScapySniffer, ScapyFactory
if config.scapyFactory is None:
config.scapyFactory = ScapyFactory(config.advanced.interface)
if not config.reports.pcap:
prefix = 'report'
else:
prefix = config.reports.pcap
filename = config.global_options['reportfile'] if 'reportfile' in config.global_options.keys() else None
filename_pcap = generate_filename(testDetails, filename=filename, prefix=prefix, extension='pcap')
if len(self.sniffers) > 0:
pcap_filenames = set(sniffer.pcapwriter.filename for sniffer in self.sniffers.values())
pcap_filenames.add(filename_pcap)
log.msg("pcap files %s can be messed up because several netTests are being executed in parallel." %
','.join(pcap_filenames))
sniffer = ScapySniffer(filename_pcap)
self.sniffers[testDetails['test_name']] = sniffer
config.scapyFactory.registerProtocol(sniffer)
log.msg("Starting packet capture to: %s" % filename_pcap)
@defer.inlineCallbacks
def getTorState(self):
connection = TCP4ClientEndpoint(reactor, '127.0.0.1',
config.tor.control_port)
config.tor_state = yield build_tor_connection(connection)
def startTor(self):
""" Starts Tor
Launches a Tor with :param: socks_port :param: control_port
:param: tor_binary set in ooniprobe.conf
"""
log.msg("Starting Tor...")
@defer.inlineCallbacks
def state_complete(state):
config.tor_state = state
log.msg("Successfully bootstrapped Tor")
log.debug("We now have the following circuits: ")
for circuit in state.circuits.values():
log.debug(" * %s" % circuit)
socks_port = yield state.protocol.get_conf("SocksPort")
control_port = yield state.protocol.get_conf("ControlPort")
config.tor.socks_port = int(socks_port.values()[0])
config.tor.control_port = int(control_port.values()[0])
def setup_failed(failure):
log.exception(failure)
raise errors.UnableToStartTor
def setup_complete(proto):
"""
Called when we read from stdout that Tor has reached 100%.
"""
log.debug("Building a TorState")
config.tor.protocol = proto
state = TorState(proto.tor_protocol)
state.post_bootstrap.addCallback(state_complete)
state.post_bootstrap.addErrback(setup_failed)
return state.post_bootstrap
def updates(prog, tag, summary):
log.msg("%d%%: %s" % (prog, summary))
tor_config = TorConfig()
if config.tor.control_port:
tor_config.ControlPort = config.tor.control_port
if config.tor.socks_port:
tor_config.SocksPort = config.tor.socks_port
if config.tor.data_dir:
data_dir = os.path.expanduser(config.tor.data_dir)
if not os.path.exists(data_dir):
log.msg("%s does not exist. Creating it." % data_dir)
os.makedirs(data_dir)
tor_config.DataDirectory = data_dir
if config.tor.bridges:
tor_config.UseBridges = 1
if config.advanced.obfsproxy_binary:
tor_config.ClientTransportPlugin = (
'obfs2,obfs3 exec %s managed' %
config.advanced.obfsproxy_binary
)
bridges = []
with open(config.tor.bridges) as f:
for bridge in f:
if 'obfs' in bridge:
if config.advanced.obfsproxy_binary:
bridges.append(bridge.strip())
else:
bridges.append(bridge.strip())
tor_config.Bridge = bridges
if config.tor.torrc:
for i in config.tor.torrc.keys():
setattr(tor_config, i, config.tor.torrc[i])
if os.geteuid() == 0:
tor_config.User = pwd.getpwuid(os.geteuid()).pw_name
tor_config.save()
if not hasattr(tor_config, 'ControlPort'):
control_port = int(randomFreePort())
tor_config.ControlPort = control_port
config.tor.control_port = control_port
if not hasattr(tor_config, 'SocksPort'):
socks_port = int(randomFreePort())
tor_config.SocksPort = socks_port
config.tor.socks_port = socks_port
tor_config.save()
log.debug("Setting control port as %s" % tor_config.ControlPort)
log.debug("Setting SOCKS port as %s" % tor_config.SocksPort)
if config.advanced.tor_binary:
d = launch_tor(tor_config, reactor,
tor_binary=config.advanced.tor_binary,
progress_updates=updates)
else:
d = launch_tor(tor_config, reactor,
progress_updates=updates)
d.addCallback(setup_complete)
d.addErrback(setup_failed)
return d
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Rosen Diankov <rosen.diankov@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_test_openrave import *
import imp
class TestConfigurationCache(EnvironmentSetup):
def setup(self):
EnvironmentSetup.setup(self)
# find out where configurationcache is installed
cachepath = None
for path, info in RaveGetPluginInfo():
pathdir, pathname = os.path.split(path)
if pathname.find('openravepy_configurationcache') >= 0:
cachepath = path
break
assert(cachepath is not None)
self.openravepy_configurationcache = imp.load_dynamic('openravepy_configurationcache',cachepath)
def test_insertandquery(self):
self.LoadEnv('data/lab1.env.xml')
env=self.env
robot=env.GetRobots()[0]
robot.SetActiveDOFs(range(7))
cache=self.openravepy_configurationcache.ConfigurationCache(robot)
values = robot.GetActiveDOFValues()
inserted = cache.InsertConfiguration(values, None)
assert(inserted and cache.GetNumNodes()==1)
values[1] = pi/2
inserted=cache.InsertConfiguration(values, None)
assert(inserted and cache.GetNumNodes()>=2)
assert(cache.Validate())
values[1] = pi/2-0.0001
robot.SetActiveDOFValues(values)
ret, closestdist, collisioninfo = cache.CheckCollision(values)
assert(ret==0)
originalvalues = array([0,pi/2,0,pi/6,0,0,0])
sampler = RaveCreateSpaceSampler(env, u'MT19937')
sampler.SetSpaceDOF(robot.GetActiveDOF())
report=CollisionReport()
with env:
for iter in range(0, 10000):
robot.SetActiveDOFValues(originalvalues + 0.05*(sampler.SampleSequence(SampleDataType.Real,1)-0.5))
samplevalues = robot.GetActiveDOFValues()
incollision = env.CheckCollision(robot, report=report)
inserted = cache.InsertConfiguration(samplevalues, report if incollision else None)
self.log.info('cache has %d nodes', cache.GetNumNodes())
assert(cache.Validate())
with env:
numspurious = 0
nummisses = 0
numtests = 1000
collisiontimes = []
cachetimes = []
for iter in range(0, numtests):
robot.SetActiveDOFValues(originalvalues + 0.05*(sampler.SampleSequence(SampleDataType.Real,1)-0.5))
samplevalues = robot.GetActiveDOFValues()
starttime=time.time()
ret, closestdist, collisioninfo = cache.CheckCollision(samplevalues)
midtime=time.time()
incollision = env.CheckCollision(robot, report=report)
endtime=time.time()
cachetimes.append(midtime-starttime)
collisiontimes.append(endtime-midtime)
if ret != -1:
assert(closestdist <= 1)
# might give spurious collision since cache is being conservative
if incollision != ret:
if ret == 1:
numspurious += 1
else:
# unexpected freespace
assert(0)
else:
nummisses += 1
self.log.info('num spurious colisions=%d/%d, num misses = %d/%d, meancache=%fs, meancollision=%fs', numspurious, numtests, nummisses, numtests, mean(cachetimes), mean(collisiontimes))
assert(float(numspurious)/float(numtests)<=0.06)
assert(float(nummisses)/float(numtests)>0.1) # space is pretty big
assert(mean(cachetimes) < mean(collisiontimes)) # caching should be faster
def test_io(self):
env = self.env
with env:
self.LoadEnv('data/hironxtable.env.xml')
robot = env.GetRobots()[0]
manip = robot.SetActiveManipulator('leftarm_torso')
lmodel=databases.linkstatistics.LinkStatisticsModel(robot)
if not lmodel.load():
lmodel.autogenerate()
lmodel.setRobotWeights()
lmodel.setRobotResolutions(xyzdelta=0.04)
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(manip.GetArmIndices())
goal = robot.GetActiveDOFValues()
goal[0] = -0.556
goal[3] = -1.86
oldchecker = env.GetCollisionChecker()
cachechecker = RaveCreateCollisionChecker(self.env,'CacheChecker')
success=cachechecker.SendCommand('TrackRobotState %s'%robot.GetName())
assert(success is not None)
env.SetCollisionChecker(cachechecker)
robot.SetSelfCollisionChecker(cachechecker)
sampler = RaveCreateSpaceSampler(env, u'RobotConfiguration %s'%robot.GetName())
sampler.SampleSequence(SampleDataType.Real,1)
report=CollisionReport()
cachechecker.SendCommand('ResetSelfCache')
stime = time.time()
confs = []
for iter in range(0, 500):
robot.SetActiveDOFValues(sampler.SampleSequence(SampleDataType.Real,1))
samplevalues = robot.GetActiveDOFValues()
confs.append(samplevalues)
if (iter%10==0):
self.log.info('checking self collisions %s...',iter)
self.log.info('writing cache to file...')
cachechecker.SendCommand('SaveCache')
env.GetCollisionChecker().CheckSelfCollision(robot, report=report)
rawtime = time.time()-stime
selfcachedcollisions, selfcachedcollisionhits, selfcachedfreehits, selfcachesize = cachechecker.SendCommand('GetSelfCacheStatistics').split()
self.log.info('selfcollisionhits=%s selffreehits=%s selfcachesize=%s in %ss', selfcachedcollisionhits, selfcachedfreehits, selfcachesize, rawtime)
self.log.info('writing cache to file...')
cachechecker.SendCommand('SaveCache')
def test_find_insert(self):
self.LoadEnv('data/lab1.env.xml')
env=self.env
robot=env.GetRobots()[0]
robot.SetActiveDOFs(range(7))
cache=self.openravepy_configurationcache.ConfigurationCache(robot)
cache.SetFreeSpaceThresh(1)
values = robot.GetActiveDOFValues()
inserted = cache.InsertConfiguration(values, None)
sampler = RaveCreateSpaceSampler(env, u'MT19937')
sampler.SetSpaceDOF(robot.GetActiveDOF())
with env:
self.log.info('testing exhaustive insertion...')
for iter in range(0, 10000):
if iter%1000==0:
self.log.info('%d valid insertions %d nodes...',iter,cache.GetNumNodes())
samplevalues = 0.3*(sampler.SampleSequence(SampleDataType.Real,1)-0.5)
nn = cache.FindNearestNode(samplevalues, 4)
if nn is None:
cache.SetFreeSpaceThresh(8)
inserted = cache.InsertConfigurationDist(samplevalues, None, 1)
assert(inserted == 1)
cache.SetFreeSpaceThresh(1)
self.log.info('exhaustive insertion test passed')
def test_updates(self):
env = self.env
with env:
self.LoadEnv('data/hironxtable.env.xml')
robot = env.GetRobots()[0]
manip = robot.SetActiveManipulator('leftarm_torso')
lmodel=databases.linkstatistics.LinkStatisticsModel(robot)
if not lmodel.load():
lmodel.autogenerate()
lmodel.setRobotWeights()
lmodel.setRobotResolutions(xyzdelta=0.01)
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(manip.GetArmIndices())
goal = robot.GetActiveDOFValues()
goal[0] = -0.556
goal[3] = -1.86
self.log.info('testing cache updates...')
oldchecker = env.GetCollisionChecker()
cachechecker = RaveCreateCollisionChecker(self.env,'CacheChecker')
success=cachechecker.SendCommand('TrackRobotState %s'%robot.GetName())
assert(success is not None)
env.SetCollisionChecker(cachechecker)
robot.SetSelfCollisionChecker(cachechecker)
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
cachedcollisions, cachedcollisionhits, cachedfreehits, oldcachesize = cachechecker.SendCommand('GetCacheStatistics').split()
self.env.Remove(self.env.GetBodies()[1])
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetCacheStatistics').split()
assert(oldcachesize>cachesize)
self.log.info('environment removebody test passed (%s/%s)',cachesize,oldcachesize)
assert(int(cachechecker.SendCommand('ValidateCache')) == 1)
assert(int(cachechecker.SendCommand('ValidateSelfCache')) == 1)
self.log.info('valid tests passed')
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetCacheStatistics').split()
self.env.Reset()
cachedcollisions, cachedcollisionhits, cachedfreehits, addcachesize = cachechecker.SendCommand('GetCacheStatistics').split()
assert(cachesize>addcachesize)
self.log.info('environment addbody test passed (%s/%s)',addcachesize,cachesize)
assert(int(cachechecker.SendCommand('ValidateCache')) == 1)
assert(int(cachechecker.SendCommand('ValidateSelfCache')) == 1)
self.log.info('valid tests passed')
def test_planning(self):
env = self.env
with env:
self.LoadEnv('data/hironxtable.env.xml')
robot = env.GetRobots()[0]
manip = robot.SetActiveManipulator('leftarm_torso')
lmodel=databases.linkstatistics.LinkStatisticsModel(robot)
if not lmodel.load():
lmodel.autogenerate()
lmodel.setRobotWeights()
lmodel.setRobotResolutions(xyzdelta=0.004)
basemanip = interfaces.BaseManipulation(robot)
robot.SetActiveDOFs(manip.GetArmIndices())
goal = robot.GetActiveDOFValues()
goal[0] = -0.556
goal[3] = -1.86
self.log.info('testing planning...')
oldchecker = env.GetCollisionChecker()
cachechecker = RaveCreateCollisionChecker(self.env,'CacheChecker')
success=cachechecker.SendCommand('TrackRobotState %s'%robot.GetName())
assert(success is not None)
starttime = time.time()
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
regtime = time.time()-starttime
self.log.info('time without cache %s',regtime)
cachechecker.SendCommand('ResetSelfCache')
env.SetCollisionChecker(cachechecker)
robot.SetSelfCollisionChecker(cachechecker)
cachedtimes = []
prevtime = float('Inf')
for runs in range(5):
starttime = time.time()
traj = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=True,outputtrajobj=True)
cachetime = time.time()-starttime
cachedtimes.append(cachetime)
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetCacheStatistics').split()
cacherate = float((float(cachedfreehits)+float(cachedcollisionhits))/float(cachedcollisions))
selfcachedcollisions, selfcachedcollisionhits, selfcachedfreehits, selfcachesize = cachechecker.SendCommand('GetSelfCacheStatistics').split()
selfcacherate = float((float(selfcachedfreehits)+float(selfcachedcollisionhits))/float(selfcachedcollisions))
self.log.info('planning time=%fs collisionhits=%s/%s freehits=%s/%s cachesize=%s selfcollisionhits=%s/%s selffreehits=%s/%s selfcachesize=%s', cachetime, cachedcollisionhits, cachedcollisions, cachedfreehits, cachedcollisions, cachesize, selfcachedcollisionhits, selfcachedcollisions, selfcachedfreehits, selfcachedcollisions, selfcachesize)
self.log.info('cacherate=%f selfcacherate=%f',cacherate,selfcacherate)
self.log.info('%s',cachechecker.SendCommand('GetCacheTimes'))
self.log.info('run %s', runs)
with robot:
parameters = Planner.PlannerParameters()
parameters.SetRobotActiveJoints(robot)
planningutils.VerifyTrajectory(parameters,traj,samplingstep=0.001)
self.log.info('trajectory test passed')
assert(cachetime < prevtime*1.5)
self.log.info('monotonic decrease test passed (%fs/%fs)',cachetime, prevtime)
prevtime = cachetime
assert(cacherate > 0.9 and selfcacherate > 0.9)
self.log.info('hitrate test passed (%f)(%f)',cacherate,selfcacherate)
env.SetCollisionChecker(oldchecker)
starttime = time.time()
traj2 = basemanip.MoveActiveJoints(goal=goal,maxiter=5000,steplength=0.01,maxtries=1,execute=False,outputtrajobj=True)
originaltime = time.time()-starttime
assert(originaltime*1.5 > cachedtimes[0])
self.log.info('speedup test passed (%fs/%fs)',originaltime, cachedtimes[0])
with robot:
parameters = Planner.PlannerParameters()
parameters.SetRobotActiveJoints(robot)
planningutils.VerifyTrajectory(parameters,traj2,samplingstep=0.002)
spec = manip.GetArmConfigurationSpecification()
usedbodies = spec.ExtractUsedBodies(env)
assert(len(usedbodies) == 1 and usedbodies[0] == robot)
useddofindices, usedconfigindices = spec.ExtractUsedIndices(robot)
assert(sorted(useddofindices) == sorted(manip.GetArmIndices()))
cachechecker.SendCommand('ResetCache')
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetCacheStatistics').split()
assert(int(cachesize)==0)
self.log.info('cache reset test passed')
cachechecker.SendCommand('ResetSelfCache')
cachedcollisions, cachedcollisionhits, cachedfreehits, cachesize = cachechecker.SendCommand('GetSelfCacheStatistics').split()
assert(int(cachesize)==0)
self.log.info('self cache reset test passed')
|
|
"""
numsed library
numsed opcodes include unsigned operators (+, -, *) and unsigned
comparisons. This library provides functions implementing all
arithmetic and comparison signed operators using only numsed
operators.
"""
from __future__ import print_function
# signed comparison operators
def signed_eq(x, y):
if is_positive(x):
if is_positive(y):
return x == y
else:
return 0
else:
if is_positive(y):
return 0
else:
return -x == -y
def signed_noteq(x, y):
return not signed_eq(x, y)
def signed_lt(x, y):
if is_positive(x):
if is_positive(y):
return x < y
else:
return 0
else:
if is_positive(y):
return 1
else:
return -x > -y
def signed_lte(x, y):
if is_positive(x):
if is_positive(y):
return x <= y
else:
return 0
else:
if is_positive(y):
return 1
else:
return -x >= -y
def signed_gt(x, y):
return not signed_lte(x, y)
def signed_gte(x, y):
return not signed_lt(x, y)
# unsigned arithmetic operators
def udivmod(a, b):
if b == 10:
return divmod10(a)
# http://compoasso.free.fr/primelistweb/page/prime/euclide.php
r = a
q = 0
n = 0
aux = b
while aux <= a:
aux *= 2
n += 1
while n > 0:
aux = divide_by_two(aux)
n -= 1
q *= 2
if r >= aux:
r -= aux
q += 1
return q, r
def udiv(a, b):
if b == 10:
return divide_by_ten(a)
r = a
q = 0
n = 0
aux = b
while aux <= a:
aux *= 2
n += 1
while n > 0:
aux = divide_by_two(aux)
n -= 1
q *= 2
if r >= aux:
r -= aux
q += 1
return q
def umod(a, b):
if b == 10:
return modulo_ten(a)
r = a
q = 0
n = 0
aux = b
while aux <= a:
aux *= 2
n += 1
while n > 0:
aux = divide_by_two(aux)
n -= 1
q *= 2
if r >= aux:
r -= aux
q += 1
return r
def upow(base, exp):
result = 1
while exp:
if is_odd(exp):
result *= base
exp = divide_by_two(exp)
base *= base
return result
# signed arithmetic operators
def signed_add(x, y):
if is_positive(x):
if is_positive(y):
r = x + y
else:
y = -y
if x > y:
r = x - y
else:
r = -(y - x)
else:
x = -x
if is_positive(y):
if x > y:
r = -(x - y)
else:
r = y - x
else:
y = -y
r = -(x + y)
return r
def signed_sub(x, y):
if is_positive(x):
if is_positive(y):
if x > y:
return x - y
else:
return -(y - x)
else:
return x + -y
else:
abs_x = -x
if is_positive(y):
return -(abs_x + y)
else:
abs_y = -y
if abs_x > abs_y:
return -(abs_x - abs_y)
else:
return abs_y - abs_x
def signed_mult(x, y):
if is_positive(x):
if is_positive(y):
return x * y
else:
return -(x * -y)
else:
if is_positive(y):
return -(-x * y)
else:
return -x * -y
def signed_div(x, y):
abs_x = abs(x)
abs_y = abs(y)
q, r = udivmod(abs_x, abs_y)
if is_positive(x):
if is_positive(y):
return q
else:
if r == 0:
return -q
else:
return -(q + 1)
else:
if is_positive(y):
if r == 0:
return -q
else:
return -(q + 1)
else:
return q
def signed_mod(x, y):
abs_x = abs(x)
abs_y = abs(y)
r = umod(abs_x, abs_y)
if is_positive(x):
if is_positive(y):
return r
else:
return 0 if r == 0 else -(abs_y - r)
else:
if is_positive(y):
return 0 if r == 0 else y - r
else:
return -r
def signed_divmod(x, y):
abs_x = abs(x)
abs_y = abs(y)
q, r = udivmod(abs_x, abs_y)
if is_positive(x):
if is_positive(y):
return q, r
else:
if r == 0:
return -q, 0
else:
return -(q + 1), -(abs_y - r)
else:
if is_positive(y):
if r == 0:
return -q, 0
else:
return -(q + 1), y - r
else:
return q, -r
def signed_pow(base, exp):
if not is_positive(exp):
print('numsed error: Exponent should be positive: ', exp)
exit()
if is_positive(base):
return upow(base, exp)
else:
r = upow(-base, exp)
return -r if is_odd(exp) else r
# -- Primitives --------------------------------------------------------------
"""
Primitive functions are used in the definition of the library functions.
However, they are handled separately:
- they are not transformed,
- they are added to positive forms. This enables to test the transformation,
- they are removed when generating opcodes and replaced with dedicated
opcodes.
Note that current implementation imposes that there is no call of any function
as an argument of a primitive function and there is no control to check that.
"""
PRIMITIVES = ('is_positive', 'abs', 'is_odd', 'divide_by_two',
'divide_by_ten', 'modulo_ten', 'divmod10')
def is_positive(x):
return x >= 0
def abs(x):
return x if x >= 0 else -x
def is_odd(x):
return x % 2
def divide_by_two(x):
return x // 2
def divide_by_ten(x):
return x // 10
def modulo_ten(x):
return x % 10
def divmod10(x):
return x // 10, x % 10
|
|
# stdlib
from collections import namedtuple
import re
# project
from checks import AgentCheck
from utils.tailfile import TailFile
# fields order for each event type, as named tuples
EVENT_FIELDS = {
'CURRENT HOST STATE': namedtuple('E_CurrentHostState', 'host, event_state, event_soft_hard, return_code, payload'),
'CURRENT SERVICE STATE': namedtuple('E_CurrentServiceState', 'host, check_name, event_state, event_soft_hard, return_code, payload'),
'SERVICE ALERT': namedtuple('E_ServiceAlert', 'host, check_name, event_state, event_soft_hard, return_code, payload'),
'PASSIVE SERVICE CHECK': namedtuple('E_PassiveServiceCheck', 'host, check_name, return_code, payload'),
'HOST ALERT': namedtuple('E_HostAlert', 'host, event_state, event_soft_hard, return_code, payload'),
# [1305744274] SERVICE NOTIFICATION: ops;ip-10-114-237-165;Metric ETL;ACKNOWLEDGEMENT (CRITICAL);notify-service-by-email;HTTP CRITICAL: HTTP/1.1 503 Service Unavailable - 394 bytes in 0.010 second response time;datadog;alq
'SERVICE NOTIFICATION': namedtuple('E_ServiceNotification', 'contact, host, check_name, event_state, notification_type, payload'),
# [1296509331] SERVICE FLAPPING ALERT: ip-10-114-97-27;cassandra JVM Heap;STARTED; Service appears to have started flapping (23.4% change >= 20.0% threshold)
# [1296662511] SERVICE FLAPPING ALERT: ip-10-114-97-27;cassandra JVM Heap;STOPPED; Service appears to have stopped flapping (3.8% change < 5.0% threshold)
'SERVICE FLAPPING ALERT': namedtuple('E_FlappingAlert', 'host, check_name, flap_start_stop, payload'),
# Reference for external commands: http://old.nagios.org/developerinfo/externalcommands/commandlist.php
# Command Format:
# ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;<sticky>;<notify>;<persistent>;<author>;<comment>
# [1305832665] EXTERNAL COMMAND: ACKNOWLEDGE_SVC_PROBLEM;ip-10-202-161-236;Resources ETL;2;1;0;datadog;alq checking
'ACKNOWLEDGE_SVC_PROBLEM': namedtuple('E_ServiceAck', 'host, check_name, sticky_ack, notify_ack, persistent_ack, ack_author, payload'),
# Command Format:
# ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;<persistent>;<author>;<comment>
'ACKNOWLEDGE_HOST_PROBLEM': namedtuple('E_HostAck', 'host, sticky_ack, notify_ack, persistent_ack, ack_author, payload'),
# Comment Format:
# PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<result_code>;<comment>
# We ignore it because Nagios will log a "PASSIVE SERVICE CHECK" after
# receiving this, and we don't want duplicate events to be counted.
'PROCESS_SERVICE_CHECK_RESULT': False,
# Host Downtime
# [1297894825] HOST DOWNTIME ALERT: ip-10-114-89-59;STARTED; Host has entered a period of scheduled downtime
# [1297894825] SERVICE DOWNTIME ALERT: ip-10-114-237-165;intake;STARTED; Service has entered a period of scheduled downtime
'HOST DOWNTIME ALERT': namedtuple('E_HostDowntime', 'host, downtime_start_stop, payload'),
'SERVICE DOWNTIME ALERT': namedtuple('E_ServiceDowntime', 'host, check_name, downtime_start_stop, payload'),
}
# Regex for the Nagios event log
RE_LINE_REG = re.compile('^\[(\d+)\] EXTERNAL COMMAND: (\w+);(.*)$')
RE_LINE_EXT = re.compile('^\[(\d+)\] ([^:]+): (.*)$')
class Nagios(AgentCheck):
NAGIOS_CONF_KEYS = [
re.compile('^(?P<key>log_file)\s*=\s*(?P<value>.+)$'),
re.compile('^(?P<key>host_perfdata_file_template)\s*=\s*(?P<value>.+)$'),
re.compile('^(?P<key>service_perfdata_file_template)\s*=\s*(?P<value>.+)$'),
re.compile('^(?P<key>host_perfdata_file)\s*=\s*(?P<value>.+)$'),
re.compile('^(?P<key>service_perfdata_file)\s*=\s*(?P<value>.+)$'),
]
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.nagios_tails = {}
check_freq = init_config.get("check_freq", 15)
if instances is not None:
for instance in instances:
tailers = []
nagios_conf = {}
instance_key = None
if 'nagios_conf' in instance: # conf.d check
conf_path = instance['nagios_conf']
nagios_conf = self.parse_nagios_config(conf_path)
instance_key = conf_path
# Retrocompatibility Code
elif 'nagios_perf_cfg' in instance:
conf_path = instance['nagios_perf_cfg']
nagios_conf = self.parse_nagios_config(conf_path)
instance["collect_host_performance_data"] = True
instance["collect_service_performance_data"] = True
instance_key = conf_path
if 'nagios_log' in instance:
nagios_conf["log_file"] = instance['nagios_log']
if instance_key is None:
instance_key = instance['nagios_log']
# End of retrocompatibility code
if not nagios_conf:
self.log.warning("Missing path to nagios_conf")
continue
if 'log_file' in nagios_conf and \
instance.get('collect_events', True):
self.log.debug("Starting to tail the event log")
tailers.append(NagiosEventLogTailer(
log_path=nagios_conf['log_file'],
file_template=None,
logger=self.log,
hostname=self.hostname,
event_func=self.event,
gauge_func=self.gauge,
freq=check_freq,
passive_checks=instance.get('passive_checks_events', False)))
if 'host_perfdata_file' in nagios_conf and \
'host_perfdata_file_template' in nagios_conf and \
instance.get('collect_host_performance_data', False):
self.log.debug("Starting to tail the host_perfdata file")
tailers.append(NagiosHostPerfDataTailer(
log_path=nagios_conf['host_perfdata_file'],
file_template=nagios_conf['host_perfdata_file_template'],
logger=self.log,
hostname=self.hostname,
event_func=self.event,
gauge_func=self.gauge,
freq=check_freq))
if 'service_perfdata_file' in nagios_conf and \
'service_perfdata_file_template' in nagios_conf and \
instance.get('collect_service_performance_data', False):
self.log.debug("Starting to tail the service_perfdata file")
tailers.append(NagiosServicePerfDataTailer(
log_path=nagios_conf['service_perfdata_file'],
file_template=nagios_conf['service_perfdata_file_template'],
logger=self.log,
hostname=self.hostname,
event_func=self.event,
gauge_func=self.gauge,
freq=check_freq))
self.nagios_tails[instance_key] = tailers
def parse_nagios_config(self, filename):
output = {}
f = None
try:
f = open(filename)
for line in f:
line = line.strip()
if not line:
continue
for key in self.NAGIOS_CONF_KEYS:
m = key.match(line)
if m:
output[m.group('key')] = m.group('value')
break
return output
except Exception as e:
# Can't parse, assume it's just not working
# Don't return an incomplete config
self.log.exception(e)
raise Exception("Could not parse Nagios config file")
finally:
if f is not None:
f.close()
def check(self, instance):
'''
Parse until the end of each tailer associated with this instance.
We match instance and tailers based on the path to the Nagios configuration file
Special case: Compatibility with the old conf when no conf file is specified
but the path to the event_log is given
'''
instance_key = instance.get('nagios_conf',
instance.get('nagios_perf_cfg',
instance.get('nagios_log',
None)))
# Bad configuration: This instance does not contain any necessary configuration
if not instance_key or instance_key not in self.nagios_tails:
raise Exception('No Nagios configuration file specified')
for tailer in self.nagios_tails[instance_key]:
tailer.check()
class NagiosTailer(object):
def __init__(self, log_path, file_template, logger, hostname, event_func, gauge_func, freq):
'''
:param log_path: string, path to the file to parse
:param file_template: string, format of the perfdata file
:param logger: Logger object
:param hostname: string, name of the host this agent is running on
:param event_func: function to create event, should accept dict
:param gauge_func: function to report a gauge
:param freq: int, size of bucket to aggregate perfdata metrics
'''
self.log_path = log_path
self.log = logger
self.gen = None
self.tail = None
self.hostname = hostname
self._event = event_func
self._gauge = gauge_func
self._line_parsed = 0
self._freq = freq
if file_template is not None:
self.compile_file_template(file_template)
self.tail = TailFile(self.log, self.log_path, self._parse_line)
self.gen = self.tail.tail(line_by_line=False, move_end=True)
self.gen.next()
def check(self):
self._line_parsed = 0
# read until the end of file
try:
self.log.debug("Start nagios check for file %s" % (self.log_path))
self.gen.next()
self.log.debug("Done nagios check for file %s (parsed %s line(s))" %
(self.log_path, self._line_parsed))
except StopIteration, e:
self.log.exception(e)
self.log.warning("Can't tail %s file" % (self.log_path))
def compile_file_template(self, file_template):
try:
# Escape characters that will be interpreted as regex bits
# e.g. [ and ] in "[SERVICEPERFDATA]"
regex = re.sub(r'[[\]*]', r'.', file_template)
regex = re.sub(r'\$([^\$]*)\$', r'(?P<\1>[^\$]*)', regex)
self.line_pattern = re.compile(regex)
except Exception, e:
raise InvalidDataTemplate("%s (%s)" % (file_template, e))
class NagiosEventLogTailer(NagiosTailer):
def __init__(self, log_path, file_template, logger, hostname, event_func,
gauge_func, freq, passive_checks=False):
'''
:param log_path: string, path to the file to parse
:param file_template: string, format of the perfdata file
:param logger: Logger object
:param hostname: string, name of the host this agent is running on
:param event_func: function to create event, should accept dict
:param gauge_func: function to report a gauge
:param freq: int, size of bucket to aggregate perfdata metrics
:param passive_checks: bool, enable or not passive checks events
'''
self.passive_checks = passive_checks
super(NagiosEventLogTailer, self).__init__(
log_path, file_template,
logger, hostname, event_func, gauge_func, freq
)
def _parse_line(self, line):
"""Actual nagios parsing
Return True if we found an event, False otherwise
"""
# first isolate the timestamp and the event type
try:
self._line_parsed = self._line_parsed + 1
m = RE_LINE_REG.match(line)
if m is None:
m = RE_LINE_EXT.match(line)
if m is None:
return False
self.log.debug("Matching line found %s" % line)
(tstamp, event_type, remainder) = m.groups()
tstamp = int(tstamp)
# skip passive checks reports by default for spamminess
if event_type == 'PASSIVE SERVICE CHECK' and not self.passive_checks:
return False
# then retrieve the event format for each specific event type
fields = EVENT_FIELDS.get(event_type, None)
if fields is None:
self.log.warning("Ignoring unknown nagios event for line: %s" % (line[:-1]))
return False
elif fields is False:
# Ignore and skip
self.log.debug("Ignoring Nagios event for line: %s" % (line[:-1]))
return False
# and parse the rest of the line
parts = map(lambda p: p.strip(), remainder.split(';'))
# Chop parts we don't recognize
parts = parts[:len(fields._fields)]
event = self.create_event(tstamp, event_type, self.hostname, fields._make(parts))
self._event(event)
self.log.debug("Nagios event: %s" % (event))
return True
except Exception:
self.log.exception("Unable to create a nagios event from line: [%s]" % (line))
return False
def create_event(self, timestamp, event_type, hostname, fields):
"""Factory method called by the parsers
"""
d = fields._asdict()
d.update({'timestamp': timestamp,
'event_type': event_type})
# if host is localhost, turn that into the internal host name
host = d.get('host', None)
if host == "localhost":
d["host"] = hostname
return d
class NagiosPerfDataTailer(NagiosTailer):
perfdata_field = '' # Should be overriden by subclasses
metric_prefix = 'nagios'
pair_pattern = re.compile(r"".join([
r"'?(?P<label>[^=']+)'?=",
r"(?P<value>[-0-9.]+)",
r"(?P<unit>s|us|ms|%|B|KB|MB|GB|TB|c)?",
r"(;(?P<warn>@?[-0-9.~]*:?[-0-9.~]*))?",
r"(;(?P<crit>@?[-0-9.~]*:?[-0-9.~]*))?",
r"(;(?P<min>[-0-9.]*))?",
r"(;(?P<max>[-0-9.]*))?",
]))
@staticmethod
def underscorize(s):
return s.replace(' ', '_').lower()
def _get_metric_prefix(self, data):
raise NotImplementedError()
def _parse_line(self, line):
matched = self.line_pattern.match(line)
if matched:
self.log.debug("Matching line found %s" % line)
data = matched.groupdict()
metric_prefix = self._get_metric_prefix(data)
# Parse the prefdata values, which are a space-delimited list of:
# 'label'=value[UOM];[warn];[crit];[min];[max]
perf_data = data.get(self.perfdata_field, '').split(' ')
for pair in perf_data:
pair_match = self.pair_pattern.match(pair)
if not pair_match:
continue
else:
pair_data = pair_match.groupdict()
label = pair_data['label']
timestamp = data.get('TIMET', None)
if timestamp is not None:
timestamp = (int(float(timestamp)) / self._freq) * self._freq
value = float(pair_data['value'])
device_name = None
if '/' in label:
# Special case: if the label begins
# with a /, treat the label as the device
# and use the metric prefix as the metric name
metric = '.'.join(metric_prefix)
device_name = label
else:
# Otherwise, append the label to the metric prefix
# and use that as the metric name
metric = '.'.join(metric_prefix + [label])
host_name = data.get('HOSTNAME', self.hostname)
optional_keys = ['unit', 'warn', 'crit', 'min', 'max']
tags = []
for key in optional_keys:
attr_val = pair_data.get(key, None)
if attr_val is not None and attr_val != '':
tags.append("{0}:{1}".format(key, attr_val))
self._gauge(metric, value, tags, host_name, device_name, timestamp)
class NagiosHostPerfDataTailer(NagiosPerfDataTailer):
perfdata_field = 'HOSTPERFDATA'
def _get_metric_prefix(self, line_data):
return [self.metric_prefix, 'host']
class NagiosServicePerfDataTailer(NagiosPerfDataTailer):
perfdata_field = 'SERVICEPERFDATA'
def _get_metric_prefix(self, line_data):
metric = [self.metric_prefix]
middle_name = line_data.get('SERVICEDESC', None)
if middle_name:
metric.append(middle_name.replace(' ', '_').lower())
return metric
class InvalidDataTemplate(Exception):
pass
|
|
# coding=utf-8
# Copyright 2022 The REALM authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization classes for REALM."""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...file_utils import PaddingStrategy
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_realm import RealmTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt",
"google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt",
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont",
"google/realm-cc-news-pretrained-encoder": "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json",
"google/realm-cc-news-pretrained-scorer": "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json",
"google/realm-cc-news-pretrained-openqa": "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json",
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class RealmTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" REALM tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
[`RealmTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation
splitting and wordpiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = RealmTokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", do_lower_case) != do_lower_case
or normalizer_state.get("strip_accents", strip_accents) != strip_accents
or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
):
normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
normalizer_state["lowercase"] = do_lower_case
normalizer_state["strip_accents"] = strip_accents
normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
self.do_lower_case = do_lower_case
def batch_encode_candidates(self, text, **kwargs):
r"""
Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following
differences:
1. Handle additional num_candidate axis. (batch_size, num_candidates, text)
2. Always pad the sequences to *max_length*.
3. Must specify *max_length* in order to stack packs of candidates into a batch.
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
text (`List[List[str]]`):
The batch of sequences to be encoded. Each sequence must be in this format: (batch_size,
num_candidates, text).
text_pair (`List[List[str]]`, *optional*):
The batch of sequences to be encoded. Each sequence must be in this format: (batch_size,
num_candidates, text).
**kwargs:
Keyword arguments of the __call__ method.
Returns:
[`BatchEncoding`]: Encoded text or text pair.
Example:
```python
>>> from transformers import RealmTokenizerFast
>>> # batch_size = 2, num_candidates = 2
>>> text = [["Hello world!", "Nice to meet you!"], ["The cute cat.", "The adorable dog."]]
>>> tokenizer = RealmTokenizerFast.from_pretrained("google/realm-cc-news-pretrained-encoder")
>>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors="pt")
```"""
# Always using a fixed sequence length to encode in order to stack candidates into a batch.
kwargs["padding"] = PaddingStrategy.MAX_LENGTH
batch_text = text
batch_text_pair = kwargs.pop("text_pair", None)
return_tensors = kwargs.pop("return_tensors", None)
output_data = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(batch_text):
if batch_text_pair is not None:
candidate_text_pair = batch_text_pair[idx]
else:
candidate_text_pair = None
encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs)
encoded_input_ids = encoded_candidates.get("input_ids")
encoded_attention_mask = encoded_candidates.get("attention_mask")
encoded_token_type_ids = encoded_candidates.get("token_type_ids")
if encoded_input_ids is not None:
output_data["input_ids"].append(encoded_input_ids)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(encoded_attention_mask)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(encoded_token_type_ids)
output_data = dict((key, item) for key, item in output_data.items() if len(item) != 0)
return BatchEncoding(output_data, tensor_type=return_tensors)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A REALM sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A REALM sequence
pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
|
"""
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from atomic_reactor.utils.cachito import (
CachitoAPI, CachitoAPIInvalidRequest, CachitoAPIRequestTimeout, CachitoAPIUnsuccessfulRequest)
from requests.exceptions import HTTPError
import flexmock
import pytest
import responses
import json
import os.path
import re
import time
from datetime import datetime
from textwrap import dedent
CACHITO_URL = 'http://cachito.example.com'
CACHITO_REQUEST_ID = 123
CACHITO_REQUEST_DOWNLOAD_URL = \
'{}/api/v1/requests/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID)
CACHITO_REQUEST_REF = 'e1be527f39ec31323f0454f7d1422c6260b00580'
CACHITO_REQUEST_REPO = 'https://github.com/release-engineering/retrodep.git'
@responses.activate
@pytest.mark.parametrize('additional_params', (
{},
{'flags': ['spam', 'bacon']},
{'pkg_managers': ['gomod']},
{'pkg_managers': []},
{'pkg_managers': None},
{'user': 'ham'},
{'dependency_replacements': [{
'name': 'eample.com/repo/project',
'type': 'gomod',
'version': '1.1.1',
}]
},
{'packages': {'npm': [{'path': 'client'}]}},
{'packages': None},
))
def test_request_sources(additional_params, caplog):
response_data = {'id': CACHITO_REQUEST_ID}
def handle_request_sources(http_request):
body_json = json.loads(http_request.body)
assert body_json['repo'] == CACHITO_REQUEST_REPO
assert body_json['ref'] == CACHITO_REQUEST_REF
for key, value in additional_params.items():
if value is not None:
assert body_json[key] == value
else:
assert key not in body_json
return (201, {}, json.dumps(response_data))
responses.add_callback(
responses.POST,
'{}/api/v1/requests'.format(CACHITO_URL),
content_type='application/json',
callback=handle_request_sources)
api = CachitoAPI(CACHITO_URL)
response = api.request_sources(CACHITO_REQUEST_REPO, CACHITO_REQUEST_REF, **additional_params)
assert response['id'] == CACHITO_REQUEST_ID
response_json = 'Cachito response:\n{}'.format(json.dumps(response_data, indent=4))
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", response_json) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize(('status_code', 'error', 'error_body'), (
(400, CachitoAPIInvalidRequest, json.dumps({'error': 'read the docs, please'})),
(500, HTTPError, 'Internal Server Error'),
))
def test_request_sources_error(status_code, error, error_body, caplog):
responses.add(
responses.POST,
'{}/api/v1/requests'.format(CACHITO_URL),
content_type='application/json',
body=error_body,
status=status_code,
)
with pytest.raises(error):
CachitoAPI(CACHITO_URL).request_sources(CACHITO_REQUEST_REPO, CACHITO_REQUEST_REF)
try:
response_data = json.loads(error_body)
except ValueError: # json.JSONDecodeError in py3
assert 'Cachito response' not in caplog.text
else:
response_json = 'Cachito response:\n{}'.format(json.dumps(response_data, indent=4))
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", response_json) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('burst_params', (
{'burst_retry': 0.01, 'burst_length': 0.5, 'slow_retry': 0.2},
# Set the burst_length to lower than burst_retry to trigger the slow_retry :)
{'burst_retry': 0.01, 'burst_length': 0.001, 'slow_retry': 0.01},
))
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_wait_for_request(burst_params, cachito_request, caplog):
states = ['in_progress', 'in_progress', 'complete']
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
expected_final_state = states[-1]
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'id': CACHITO_REQUEST_ID, 'state': state, 'updated': updated}))
request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
responses.add_callback(
responses.GET,
request_url,
content_type='application/json',
callback=handle_wait_for_request)
response = CachitoAPI(CACHITO_URL).wait_for_request(cachito_request, **burst_params)
assert response['id'] == CACHITO_REQUEST_ID
assert response['state'] == expected_final_state
assert len(responses.calls) == expected_total_responses_calls
expect_in_logs = dedent(
"""\
Request {} is complete
Request url: {}
"""
).format(CACHITO_REQUEST_ID, request_url)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', r" ", caplog.text)
@responses.activate
@pytest.mark.parametrize('timeout', (0, 60))
def test_wait_for_request_timeout(timeout, caplog):
request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
updated = datetime.utcnow().isoformat()
response_data = {'id': CACHITO_REQUEST_ID, 'state': 'in_progress', 'updated': updated}
responses.add(
responses.GET,
request_url,
content_type='application/json',
status=200,
body=json.dumps(response_data),
)
flexmock(time).should_receive('time').and_return(2000, 1000).one_by_one()
# Hit the timeout during bursting to make the test faster
burst_params = {'burst_retry': 0.001, 'burst_length': 0.02}
with pytest.raises(CachitoAPIRequestTimeout):
api = CachitoAPI(CACHITO_URL, timeout=timeout)
api.wait_for_request(CACHITO_REQUEST_ID, **burst_params)
in_progress_response_json = json.dumps(response_data, indent=4)
expect_in_logs = dedent(
"""\
Request {} not completed after {} seconds of not being updated
Details: {}
"""
).format(request_url, timeout, in_progress_response_json)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('error_state,error_reason',
[('failed', 'Cloning the Git repository failed'),
('stale', 'The request has expired')])
def test_wait_for_unsuccessful_request(error_state, error_reason, caplog):
states = ['in_progress', 'in_progress', error_state]
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
}))
responses.add_callback(
responses.GET,
'{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID),
content_type='application/json',
callback=handle_wait_for_request)
burst_params = {'burst_retry': 0.001, 'burst_length': 0.5}
with pytest.raises(CachitoAPIUnsuccessfulRequest):
CachitoAPI(CACHITO_URL).wait_for_request(CACHITO_REQUEST_ID, **burst_params)
assert len(responses.calls) == expected_total_responses_calls
failed_response_json = json.dumps(
{'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': error_state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
},
indent=4
)
expect_in_logs = dedent(
"""\
Request {} is in "{}" state: {}
Details: {}
"""
).format(CACHITO_REQUEST_ID, error_state, error_reason, failed_response_json)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('error_state,error_reason',
[('failed', 'Cloning the Git repository failed'),
('stale', 'The request has expired')])
def test_check_CachitoAPIUnsuccessfulRequest_text(error_state, error_reason, caplog):
states = ['in_progress', 'in_progress', error_state]
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
cachito_request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
}))
responses.add_callback(
responses.GET,
'{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID),
content_type='application/json',
callback=handle_wait_for_request)
burst_params = {'burst_retry': 0.001, 'burst_length': 0.5}
expected_exc_text = dedent('''\
Cachito request is in "{}" state, reason: {}
Request {} ({}) tried to get repo '{}' at reference '{}'.
'''.format(error_state, error_reason, CACHITO_REQUEST_ID,
cachito_request_url, CACHITO_REQUEST_REPO,
CACHITO_REQUEST_REF))
with pytest.raises(CachitoAPIUnsuccessfulRequest) as excinfo:
CachitoAPI(CACHITO_URL).wait_for_request(CACHITO_REQUEST_ID, **burst_params)
assert len(responses.calls) == expected_total_responses_calls
assert expected_exc_text in str(excinfo.value)
def test_wait_for_request_bad_request_type():
with pytest.raises(ValueError, match=r'Unexpected request type'):
CachitoAPI(CACHITO_URL).wait_for_request('spam')
@responses.activate
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_download_sources(tmpdir, cachito_request):
blob = 'glop-glop-I\'m-a-blob'
expected_dest_path = os.path.join(str(tmpdir), 'remote-source.tar.gz')
responses.add(
responses.GET,
'{}/api/v1/requests/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID),
body=blob)
dest_path = CachitoAPI(CACHITO_URL).download_sources(cachito_request, str(tmpdir))
assert dest_path == expected_dest_path
with open(dest_path) as f:
assert f.read() == blob
def test_download_sources_bad_request_type(tmpdir):
with pytest.raises(ValueError, match=r'Unexpected request type'):
CachitoAPI(CACHITO_URL).download_sources('spam', str(tmpdir))
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_assemble_download_url(tmpdir, cachito_request):
url = CachitoAPI(CACHITO_URL).assemble_download_url(cachito_request)
assert url == CACHITO_REQUEST_DOWNLOAD_URL
|
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys,logging, traceback, time
from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure,\
FunctionFailure
from cassandra.cluster import Cluster
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.query import SimpleStatement
from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node
from mock import Mock
try:
import unittest2 as unittest
except ImportError:
import unittest
log = logging.getLogger(__name__)
def setup_module():
"""
We need some custom setup for this module. All unit tests in this module
require protocol >=4. We won't bother going through the setup required unless that is the
protocol version we are using.
"""
# If we aren't at protocol v 4 or greater don't waste time setting anything up, all tests will be skipped
if PROTOCOL_VERSION >= 4:
use_singledc(start=False)
ccm_cluster = get_cluster()
ccm_cluster.stop()
config_options = {'tombstone_failure_threshold': 2000, 'tombstone_warn_threshold': 1000}
ccm_cluster.set_configuration_options(config_options)
ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
setup_keyspace()
def teardown_module():
"""
The rest of the tests don't need custom tombstones
remove the cluster so as to not interfere with other tests.
"""
if PROTOCOL_VERSION >= 4:
remove_cluster()
class ClientExceptionTests(unittest.TestCase):
def setUp(self):
"""
Test is skipped if run with native protocol version <4
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest(
"Native protocol 4,0+ is required for custom payloads, currently using %r"
% (PROTOCOL_VERSION,))
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
self.nodes_currently_failing = []
self.node1, self.node2, self.node3 = get_cluster().nodes.values()
def tearDown(self):
self.cluster.shutdown()
failing_nodes = []
# Restart the nodes to fully functional again
self.setFailingNodes(failing_nodes, "testksfail")
def execute_helper(self, session, query):
tries = 0
while tries < 100:
try:
return session.execute(query)
except OperationTimedOut:
ex_type, ex, tb = sys.exc_info()
log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
del tb
tries += 1
raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query))
def execute_concurrent_args_helper(self, session, query, params):
tries = 0
while tries < 100:
try:
return execute_concurrent_with_args(session, query, params, concurrency=50)
except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure):
ex_type, ex, tb = sys.exc_info()
log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
del tb
tries += 1
raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query))
def setFailingNodes(self, failing_nodes, keyspace):
"""
This method will take in a set of failing nodes, and toggle all of the nodes in the provided list to fail
writes.
@param failing_nodes A definitive list of nodes that should fail writes
@param keyspace The keyspace to enable failures on
"""
# Ensure all of the nodes on the list have failures enabled
for node in failing_nodes:
if node not in self.nodes_currently_failing:
node.stop(wait_other_notice=True, gently=False)
node.start(jvm_args=[" -Dcassandra.test.fail_writes_ks=" + keyspace], wait_for_binary_proto=True,
wait_other_notice=True)
self.nodes_currently_failing.append(node)
# Ensure all nodes not on the list, but that are currently set to failing are enabled
for node in self.nodes_currently_failing:
if node not in failing_nodes:
node.stop(wait_other_notice=True, gently=False)
node.start(wait_for_binary_proto=True, wait_other_notice=True)
self.nodes_currently_failing.remove(node)
def _perform_cql_statement(self, text, consistency_level, expected_exception):
"""
Simple helper method to preform cql statements and check for expected exception
@param text CQl statement to execute
@param consistency_level Consistency level at which it is to be executed
@param expected_exception Exception expected to be throw or none
"""
statement = SimpleStatement(text)
statement.consistency_level = consistency_level
if expected_exception is None:
self.execute_helper(self.session, statement)
else:
with self.assertRaises(expected_exception):
self.execute_helper(self.session, statement)
def test_write_failures_from_coordinator(self):
"""
Test to validate that write failures from the coordinator are surfaced appropriately.
test_write_failures_from_coordinator Enable write failures on the various nodes using a custom jvm flag,
cassandra.test.fail_writes_ks. This will cause writes to fail on that specific node. Depending on the replication
factor of the keyspace, and the consistency level, we will expect the coordinator to send WriteFailure, or not.
@since 2.6.0
@jira_ticket PYTHON-238
@expected_result Appropriate write failures from the coordinator
@test_category queries:basic
"""
# Setup temporary keyspace.
self._perform_cql_statement(
"""
CREATE KEYSPACE testksfail
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# create table
self._perform_cql_statement(
"""
CREATE TABLE testksfail.test (
k int PRIMARY KEY,
v int )
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# Disable one node
failing_nodes = [self.node1]
self.setFailingNodes(failing_nodes, "testksfail")
# With one node disabled we would expect a write failure with ConsistencyLevel of all
self._perform_cql_statement(
"""
INSERT INTO testksfail.test (k, v) VALUES (1, 0 )
""", consistency_level=ConsistencyLevel.ALL, expected_exception=WriteFailure)
# We have two nodes left so a write with consistency level of QUORUM should complete as expected
self._perform_cql_statement(
"""
INSERT INTO testksfail.test (k, v) VALUES (1, 0 )
""", consistency_level=ConsistencyLevel.QUORUM, expected_exception=None)
failing_nodes = []
# Restart the nodes to fully functional again
self.setFailingNodes(failing_nodes, "testksfail")
# Drop temporary keyspace
self._perform_cql_statement(
"""
DROP KEYSPACE testksfail
""", consistency_level=ConsistencyLevel.ANY, expected_exception=None)
def test_tombstone_overflow_read_failure(self):
"""
Test to validate that a ReadFailure is returned from the node when a specified threshold of tombstombs is
reached.
test_tombstomb_overflow_read_failure First sets the tombstone failure threshold down to a level that allows it
to be more easily encountered. We then create some wide rows and ensure they are deleted appropriately. This
produces the correct amount of tombstombs. Upon making a simple query we expect to get a read failure back
from the coordinator.
@since 2.6.0
@jira_ticket PYTHON-238
@expected_result Appropriate write failures from the coordinator
@test_category queries:basic
"""
# Setup table for "wide row"
self._perform_cql_statement(
"""
CREATE TABLE test3rf.test2 (
k int,
v0 int,
v1 int, PRIMARY KEY (k,v0))
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
statement = self.session.prepare("INSERT INTO test3rf.test2 (k, v0,v1) VALUES (1,?,1)")
parameters = [(x,) for x in range(3000)]
self.execute_concurrent_args_helper(self.session, statement, parameters)
statement = self.session.prepare("DELETE v1 FROM test3rf.test2 WHERE k = 1 AND v0 =?")
parameters = [(x,) for x in range(2001)]
self.execute_concurrent_args_helper(self.session, statement, parameters)
self._perform_cql_statement(
"""
SELECT * FROM test3rf.test2 WHERE k = 1
""", consistency_level=ConsistencyLevel.ALL, expected_exception=ReadFailure)
self._perform_cql_statement(
"""
DROP TABLE test3rf.test2;
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
def test_user_function_failure(self):
"""
Test to validate that exceptions in user defined function are correctly surfaced by the driver to us.
test_user_function_failure First creates a table to use for testing. Then creates a function that will throw an
exception when invoked. It then invokes the function and expects a FunctionException. Finally it preforms
cleanup operations.
@since 2.6.0
@jira_ticket PYTHON-238
@expected_result Function failures when UDF throws exception
@test_category queries:basic
"""
# create UDF that throws an exception
self._perform_cql_statement(
"""
CREATE FUNCTION test3rf.test_failure(d double)
RETURNS NULL ON NULL INPUT
RETURNS double
LANGUAGE java AS 'throw new RuntimeException("failure");';
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# Create test table
self._perform_cql_statement(
"""
CREATE TABLE test3rf.d (k int PRIMARY KEY , d double);
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# Insert some values
self._perform_cql_statement(
"""
INSERT INTO test3rf.d (k,d) VALUES (0, 5.12);
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# Run the function expect a function failure exception
self._perform_cql_statement(
"""
SELECT test_failure(d) FROM test3rf.d WHERE k = 0;
""", consistency_level=ConsistencyLevel.ALL, expected_exception=FunctionFailure)
self._perform_cql_statement(
"""
DROP FUNCTION test3rf.test_failure;
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
self._perform_cql_statement(
"""
DROP TABLE test3rf.d;
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
class TimeoutTimerTest(unittest.TestCase):
def setUp(self):
"""
Setup sessions and pause node1
"""
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
# self.node1, self.node2, self.node3 = get_cluster().nodes.values()
self.node1 = get_node(1)
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
ddl = '''
CREATE TABLE test3rf.timeout (
k int PRIMARY KEY,
v int )'''
self.session.execute(ddl)
self.node1.pause()
def tearDown(self):
"""
Shutdown cluster and resume node1
"""
self.node1.resume()
self.session.execute("DROP TABLE test3rf.timeout")
self.cluster.shutdown()
def test_async_timeouts(self):
"""
Test to validate that timeouts are honored
Exercise the underlying timeouts, by attempting a query that will timeout. Ensure the default timeout is still
honored. Make sure that user timeouts are also honored.
@since 2.7.0
@jira_ticket PYTHON-108
@expected_result timeouts should be honored
@test_category
"""
# Because node1 is stopped these statements will all timeout
ss = SimpleStatement('SELECT * FROM test3rf.test', consistency_level=ConsistencyLevel.ALL)
# Test with default timeout (should be 10)
start_time = time.time()
future = self.session.execute_async(ss)
with self.assertRaises(OperationTimedOut):
future.result()
end_time = time.time()
total_time = end_time-start_time
expected_time = self.session.default_timeout
# check timeout and ensure it's within a reasonable range
self.assertAlmostEqual(expected_time, total_time, delta=.05)
# Test with user defined timeout (Should be 1)
start_time = time.time()
future = self.session.execute_async(ss, timeout=1)
mock_callback = Mock(return_value=None)
mock_errorback = Mock(return_value=None)
future.add_callback(mock_callback)
future.add_errback(mock_errorback)
with self.assertRaises(OperationTimedOut):
future.result()
end_time = time.time()
total_time = end_time-start_time
expected_time = 1
# check timeout and ensure it's within a reasonable range
self.assertAlmostEqual(expected_time, total_time, delta=.05)
self.assertTrue(mock_errorback.called)
self.assertFalse(mock_callback.called)
|
|
from datetime import datetime
from ..enums import CardClass, CardSet, Rarity, ZodiacYear
try:
from lxml import etree as ElementTree # noqa
except ImportError:
from xml.etree import ElementTree # noqa
CARDCLASS_HERO_MAP = {
CardClass.DEMONHUNTER: "HERO_10",
CardClass.DRUID: "HERO_06",
CardClass.HUNTER: "HERO_05",
CardClass.MAGE: "HERO_08",
CardClass.PALADIN: "HERO_04",
CardClass.PRIEST: "HERO_09",
CardClass.ROGUE: "HERO_03",
CardClass.SHAMAN: "HERO_02",
CardClass.WARLOCK: "HERO_07",
CardClass.WARRIOR: "HERO_01",
CardClass.WHIZBANG: "BOT_914h",
}
SECRET_COSTS = {
CardClass.HUNTER: 2,
CardClass.MAGE: 3,
CardClass.PALADIN: 1,
CardClass.ROGUE: 2,
CardClass.WARRIOR: 0,
}
CRAFTING_COSTS = {
Rarity.COMMON: (40, 400),
Rarity.RARE: (100, 800),
Rarity.EPIC: (400, 1600),
Rarity.LEGENDARY: (1600, 3200),
}
DISENCHANT_COSTS = {
Rarity.COMMON: (5, 50),
Rarity.RARE: (20, 100),
Rarity.EPIC: (100, 400),
Rarity.LEGENDARY: (400, 1600),
}
STANDARD_SETS = {
ZodiacYear.PRE_STANDARD: [
CardSet.BASIC, CardSet.EXPERT1, CardSet.REWARD, CardSet.PROMO,
CardSet.NAXX, CardSet.GVG, CardSet.BRM, CardSet.TGT, CardSet.LOE,
],
ZodiacYear.KRAKEN: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.BRM, CardSet.TGT, CardSet.LOE, CardSet.OG, CardSet.OG_RESERVE,
CardSet.KARA, CardSet.KARA_RESERVE, CardSet.GANGS, CardSet.GANGS_RESERVE,
],
ZodiacYear.MAMMOTH: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.OG, CardSet.OG_RESERVE, CardSet.KARA, CardSet.KARA_RESERVE,
CardSet.GANGS, CardSet.GANGS_RESERVE, CardSet.UNGORO, CardSet.ICECROWN,
CardSet.LOOTAPALOOZA,
],
ZodiacYear.RAVEN: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.UNGORO, CardSet.ICECROWN, CardSet.LOOTAPALOOZA, CardSet.GILNEAS,
CardSet.BOOMSDAY, CardSet.TROLL,
],
ZodiacYear.DRAGON: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.GILNEAS, CardSet.BOOMSDAY, CardSet.TROLL, CardSet.DALARAN, CardSet.ULDUM,
CardSet.WILD_EVENT, CardSet.DRAGONS, CardSet.YEAR_OF_THE_DRAGON,
CardSet.BLACK_TEMPLE, CardSet.DEMON_HUNTER_INITIATE,
],
ZodiacYear.PHOENIX: [
CardSet.BASIC, CardSet.EXPERT1,
CardSet.DALARAN, CardSet.ULDUM, CardSet.WILD_EVENT, CardSet.DRAGONS,
CardSet.YEAR_OF_THE_DRAGON, CardSet.BLACK_TEMPLE, CardSet.DEMON_HUNTER_INITIATE,
CardSet.SCHOLOMANCE, CardSet.DARKMOON_FAIRE,
],
ZodiacYear.GRYPHON: [
CardSet.CORE,
CardSet.BLACK_TEMPLE, CardSet.SCHOLOMANCE, CardSet.DARKMOON_FAIRE,
CardSet.THE_BARRENS, CardSet.WAILING_CAVERNS, CardSet.STORMWIND,
CardSet.ALTERAC_VALLEY,
]
}
try:
_EPOCH = datetime.fromtimestamp(0)
except OSError:
# https://bugs.python.org/issue29097 (Windows-only)
_EPOCH = datetime.fromtimestamp(86400)
ZODIAC_ROTATION_DATES = {
ZodiacYear.PRE_STANDARD: _EPOCH,
ZodiacYear.KRAKEN: datetime(2016, 4, 26),
ZodiacYear.MAMMOTH: datetime(2017, 4, 7),
ZodiacYear.RAVEN: datetime(2018, 4, 12),
ZodiacYear.DRAGON: datetime(2019, 4, 9),
ZodiacYear.PHOENIX: datetime(2020, 4, 7),
ZodiacYear.GRYPHON: datetime(2021, 3, 30),
}
# QuestController.cs
QUEST_REWARDS = {
"UNG_940": "UNG_940t8",
"UNG_954": "UNG_954t1",
"UNG_934": "UNG_934t1",
"UNG_829": "UNG_829t1",
"UNG_028": "UNG_028t",
"UNG_067": "UNG_067t1",
"UNG_116": "UNG_116t",
"UNG_920": "UNG_920t1",
"UNG_942": "UNG_942t",
}
# GameplayStringTextBuilder.cs
SPELLSTONE_STRINGS = {
"LOOT_043": "GAMEPLAY_AMETHYST_SPELLSTONE_%d",
"LOOT_051": "GAMEPLAY_JASPER_SPELLSTONE_%d",
"LOOT_064": "GAMEPLAY_SAPPHIRE_SPELLSTONE_%d",
"LOOT_091": "GAMEPLAY_PEARL_SPELLSTONE_%d",
"LOOT_103": "GAMEPLAY_RUBY_SPELLSTONE_%d",
"LOOT_503": "GAMEPLAY_ONYX_SPELLSTONE_%d",
"LOOT_507": "GAMEPLAY_DIAMOND_SPELLSTONE_%d",
"LOOT_526d": "GAMEPLAY_LOOT_526d_DARKNESS_%d",
}
UPGRADABLE_CARDS_MAP = {
# Fatespinner
"ICC_047t": "ICC_047",
"ICC_047t2": "ICC_047",
# Lesser Amethyst Spellstone
"LOOT_043t2": "LOOT_043",
"LOOT_043t3": "LOOT_043",
# Lesser Jasper Spellstone
"LOOT_051t1": "LOOT_051",
"LOOT_051t2": "LOOT_051",
# Lesser Sapphire Spellstone
"LOOT_064t1": "LOOT_064",
"LOOT_064t2": "LOOT_064",
# Lesser Emerald Spellstone
"LOOT_080t2": "LOOT_080",
"LOOT_080t3": "LOOT_080",
# Lesser Pearl Spellstone
"LOOT_091t1": "LOOT_091",
"LOOT_091t2": "LOOT_091",
# Lesser Ruby Spellstone
"LOOT_103t1": "LOOT_103",
"LOOT_103t2": "LOOT_103",
# Lesser Mithril Spellstone
"LOOT_203t2": "LOOT_203",
"LOOT_203t3": "LOOT_203",
# Unidentified Elixier
"LOOT_278t1": "LOOT_278",
"LOOT_278t2": "LOOT_278",
"LOOT_278t3": "LOOT_278",
"LOOT_278t4": "LOOT_278",
# Unidentified Shield
"LOOT_285t": "LOOT_285",
"LOOT_285t2": "LOOT_285",
"LOOT_285t3": "LOOT_285",
"LOOT_285t4": "LOOT_285",
# Unidentified Maul
"LOOT_286t1": "LOOT_286",
"LOOT_286t2": "LOOT_286",
"LOOT_286t3": "LOOT_286",
"LOOT_286t4": "LOOT_286",
# Lesser Onyx Spellstone
"LOOT_503t": "LOOT_503",
"LOOT_503t2": "LOOT_503",
# Lesser Diamond Spellstone
"LOOT_507t": "LOOT_507",
"LOOT_507t2": "LOOT_507",
# Duskhaven Hunter
"GIL_200t": "GIL_200",
# Pumpkin Peasant
"GIL_201t": "GIL_201",
# Gilnean Royal Guard
"GIL_202t": "GIL_202",
# Swift Messenger
"GIL_528t": "GIL_528",
# Spellshifter
"GIL_529t": "GIL_529",
# Unidentified Contract
"DAL_366t1": "DAL_366",
"DAL_366t2": "DAL_366",
"DAL_366t3": "DAL_366",
"DAL_366t4": "DAL_366",
# Galakrond
"DRG_600t2": "DRG_600",
"DRG_600t3": "DRG_600",
"DRG_610t2": "DRG_610",
"DRG_610t3": "DRG_610",
"DRG_620t2": "DRG_620",
"DRG_620t3": "DRG_620",
"DRG_650t2": "DRG_650",
"DRG_650t3": "DRG_650",
"DRG_660t2": "DRG_660",
"DRG_660t3": "DRG_660",
# Corrupted Card
"DMF_061t": "DMF_061", # Faire Arborist
"DMF_730t": "DMF_730", # Moontouched Amulet
"DMF_083t": "DMF_083", # Dancing Cobra
"DMF_101t": "DMF_101", # Firework Elemental
"DMF_054t": "DMF_054", # Insight
"DMF_184t": "DMF_184", # Fairground Fool
"DMF_517a": "DMF_517", # Sweet Tooth
"DMF_703t": "DMF_703", # Pit Master
"DMF_526a": "DMF_526", # Stage Dive
"DMF_073t": "DMF_073", # Darkmoon Dirigible
"DMF_082t": "DMF_082", # Darkmoon Statue
"DMF_174t": "DMF_174", # Circus Medic
"DMF_163t": "DMF_163", # Carnival Clown
# Cascading Disaster
"DMF_117t2": "DMF_117",
"DMF_117t": "DMF_117",
"DMF_078t": "DMF_078", # Strongman
"DMF_186a": "DMF_186", # Auspicious Spirits
"DMF_118t": "DMF_118", # Tickatus
"DMF_247t": "DMF_247", # Insatiable Felhound
"DMF_248t": "DMF_248", # Felsteel Executioner
"DMF_064t": "DMF_064", # Carousel Gryphon
"DMF_124t": "DMF_124", # Horrendous Growth
"DMF_090t": "DMF_090", # Don't Feed the Animals
"DMF_105t": "DMF_105", # Ring Toss
"DMF_701t": "DMF_701", # Dunk Tank
"DMF_080t": "DMF_080", # Fleethoof Pearltusk
"DMF_244t": "DMF_244", # Day at the Faire
# Tame Beast
"BAR_034t": "BAR_034",
"BAR_034t2": "BAR_034",
# Chain Lightning
"BAR_044t": "BAR_044",
"BAR_044t2": "BAR_044",
# Flurry
"BAR_305t": "BAR_305",
"BAR_305t2": "BAR_305",
# Condemn
"BAR_314t": "BAR_314",
"BAR_314t2": "BAR_314",
# Wicked Stab
"BAR_319t": "BAR_319",
"BAR_319t2": "BAR_319",
# Living Seed
"BAR_536t": "BAR_536",
"BAR_536t2": "BAR_536",
# Conviction
"BAR_880t": "BAR_880",
"BAR_880t2": "BAR_880",
# Conditioning
"BAR_842t": "BAR_842",
"BAR_842t2": "BAR_842",
# Fury
"BAR_891t": "BAR_891",
"BAR_891t2": "BAR_891",
# Imp Swarm
"BAR_914t": "BAR_914",
"BAR_914t2": "BAR_914",
}
def get_original_card_id(card_id):
# Transfer Student
if str(card_id).startswith("SCH_199t"):
return "SCH_199"
return UPGRADABLE_CARDS_MAP.get(card_id, card_id)
SCHEME_CARDS = [
"DAL_007", # Rafaam's Scheme
"DAL_008", # Dr. Boom's Scheme
"DAL_009", # Hagatha's Scheme
"DAL_010", # Tagwaggle's Scheme
"DAL_011", # Lazul's Scheme
]
MAESTRA_DISGUISE_DBF_ID = 64674
|
|
# Copyright 2016-2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A :term:`Partition` is a subset of the hardware resources of a :term:`CPC`
in DPM mode, virtualized as a separate computer.
Partitions can be created and deleted dynamically, and their resources such
as CPU, memory or I/O devices can be configured dynamically.
You can create as many partition definitions as you want, but only a specific
number of partitions can be active at any given time.
TODO: How can a user find out what the maximum is, before it is reached?
Partition resources are contained in CPC resources.
Partition resources only exist in CPCs that are in DPM mode. CPCs in classic
mode (or ensemble mode) have :term:`LPAR` resources, instead.
"""
from __future__ import absolute_import
import time
import copy
from requests.utils import quote
from ._manager import BaseManager
from ._resource import BaseResource
from ._exceptions import StatusTimeout
from ._nic import NicManager
from ._hba import HbaManager
from ._virtual_function import VirtualFunctionManager
from ._logging import logged_api_call
from ._utils import matches_filters, divide_filter_args, RC_PARTITION
__all__ = ['PartitionManager', 'Partition']
class PartitionManager(BaseManager):
"""
Manager providing access to the :term:`Partitions <Partition>` in a
particular :term:`CPC`.
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variable of a
:class:`~zhmcclient.Cpc` object (in DPM mode):
* :attr:`~zhmcclient.Cpc.partitions`
"""
def __init__(self, cpc):
# This function should not go into the docs.
# Parameters:
# cpc (:class:`~zhmcclient.Cpc`):
# CPC defining the scope for this manager.
# Resource properties that are supported as filter query parameters.
# If the support for a resource property changes within the set of HMC
# versions that support this type of resource, this list must be set up
# for the version of the HMC this session is connected to.
query_props = [
'name',
'status',
]
super(PartitionManager, self).__init__(
resource_class=Partition,
class_name=RC_PARTITION,
session=cpc.manager.session,
parent=cpc,
base_uri='/api/partitions',
oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=query_props)
@property
def cpc(self):
"""
:class:`~zhmcclient.Cpc`: :term:`CPC` defining the scope for this
manager.
"""
return self._parent
@logged_api_call
def list(self, full_properties=False, filter_args=None):
"""
List the Partitions in this CPC.
Authorization requirements:
* Object-access permission to this CPC.
* Object-access permission to any Partition to be included in the
result.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only the short set as returned by the list
operation.
filter_args (dict):
Filter arguments that narrow the list of returned resources to
those that match the specified filter arguments. For details, see
:ref:`Filtering`.
`None` causes no filtering to happen, i.e. all resources are
returned.
Returns:
: A list of :class:`~zhmcclient.Partition` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
resource_obj_list = []
resource_obj = self._try_optimized_lookup(filter_args)
if resource_obj:
resource_obj_list.append(resource_obj)
# It already has full properties
else:
query_parms, client_filters = divide_filter_args(
self._query_props, filter_args)
resources_name = 'partitions'
uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms)
result = self.session.get(uri)
if result:
props_list = result[resources_name]
for props in props_list:
resource_obj = self.resource_class(
manager=self,
uri=props[self._uri_prop],
name=props.get(self._name_prop, None),
properties=props)
if matches_filters(resource_obj, client_filters):
resource_obj_list.append(resource_obj)
if full_properties:
resource_obj.pull_full_properties()
self._name_uri_cache.update_from(resource_obj_list)
return resource_obj_list
@logged_api_call
def create(self, properties):
"""
Create and configure a Partition in this CPC.
Authorization requirements:
* Object-access permission to this CPC.
* Task permission to the "New Partition" task.
Parameters:
properties (dict): Initial property values.
Allowable properties are defined in section 'Request body contents'
in section 'Create Partition' in the :term:`HMC API` book.
Returns:
Partition:
The resource object for the new Partition.
The object will have its 'object-uri' property set as returned by
the HMC, and will also have the input properties set.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result = self.session.post(self.cpc.uri + '/partitions',
body=properties)
# There should not be overlaps, but just in case there are, the
# returned props should overwrite the input props:
props = copy.deepcopy(properties)
props.update(result)
name = props.get(self._name_prop, None)
uri = props[self._uri_prop]
part = Partition(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return part
class Partition(BaseResource):
"""
Representation of a :term:`Partition`.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.PartitionManager`).
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.PartitionManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, PartitionManager), \
"Partition init: Expected manager type %s, got %s" % \
(PartitionManager, type(manager))
super(Partition, self).__init__(manager, uri, name, properties)
# The manager objects for child resources (with lazy initialization):
self._nics = None
self._hbas = None
self._virtual_functions = None
@property
def nics(self):
"""
:class:`~zhmcclient.NicManager`: Access to the :term:`NICs <NIC>` in
this Partition.
"""
# We do here some lazy loading.
if not self._nics:
self._nics = NicManager(self)
return self._nics
@property
def hbas(self):
"""
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in
this Partition.
If the "dpm-storage-management" feature is enabled, this property is
`None`.
"""
# We do here some lazy loading.
if not self._hbas:
try:
dpm_sm = self.feature_enabled('dpm-storage-management')
except ValueError:
dpm_sm = False
if not dpm_sm:
self._hbas = HbaManager(self)
return self._hbas
@property
def virtual_functions(self):
"""
:class:`~zhmcclient.VirtualFunctionManager`: Access to the
:term:`Virtual Functions <Virtual Function>` in this Partition.
"""
# We do here some lazy loading.
if not self._virtual_functions:
self._virtual_functions = VirtualFunctionManager(self)
return self._virtual_functions
@logged_api_call
def feature_enabled(self, feature_name):
"""
Indicates whether the specified feature is enabled for the CPC of this
partition.
The HMC must generally support features, and the specified feature must
be available for the CPC.
For a list of available features, see section "Features" in the
:term:`HMC API`, or use the :meth:`feature_info` method.
Authorization requirements:
* Object-access permission to this partition.
Parameters:
feature_name (:term:`string`): The name of the feature.
Returns:
bool: `True` if the feature is enabled, or `False` if the feature is
disabled (but available).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`ValueError`: The specified feature is not available for the
CPC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
feature_list = self.prop('available-features-list', None)
if feature_list is None:
raise ValueError("Firmware features are not supported on CPC %s" %
self.manager.cpc.name)
for feature in feature_list:
if feature['name'] == feature_name:
break
else:
raise ValueError("Firmware feature %s is not available on CPC %s" %
(feature_name, self.manager.cpc.name))
return feature['state'] # pylint: disable=undefined-loop-variable
@logged_api_call
def feature_info(self):
"""
Returns information about the features available for the CPC of this
partition.
Authorization requirements:
* Object-access permission to this partition.
Returns:
:term:`iterable`:
An iterable where each item represents one feature that is
available for the CPC of this partition.
Each item is a dictionary with the following items:
* `name` (:term:`unicode string`): Name of the feature.
* `description` (:term:`unicode string`): Short description of
the feature.
* `state` (bool): Enablement state of the feature (`True` if the
enabled, `False` if disabled).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
feature_list = self.prop('available-features-list', None)
if feature_list is None:
raise ValueError("Firmware features are not supported on CPC %s" %
self.manager.cpc.name)
return feature_list
@logged_api_call
def start(self, wait_for_completion=True, operation_timeout=None,
status_timeout=None):
"""
Start (activate) this Partition, using the HMC operation "Start
Partition".
This HMC operation has deferred status behavior: If the asynchronous
job on the HMC is complete, it takes a few seconds until the partition
status has reached the desired value (it still may show status
"paused"). If `wait_for_completion=True`, this method repeatedly checks
the status of the partition after the HMC operation has completed, and
waits until the status is in one of the desired states "active" or
"degraded".
TODO: Describe what happens if the maximum number of active partitions
is exceeded.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Start Partition" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the partition
has reached the desired status, after the HMC operation has
completed.
The special value 0 means that no timeout is set. `None` means that
the default async operation timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired partition status.
"""
result = self.manager.session.post(
self.uri + '/operations/start',
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["active", "degraded"]
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def stop(self, wait_for_completion=True, operation_timeout=None,
status_timeout=None):
"""
Stop (deactivate) this Partition, using the HMC operation "Stop
Partition".
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Stop Partition" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the partition
has reached the desired status, after the HMC operation has
completed.
The special value 0 means that no timeout is set. `None` means that
the default async operation timeout of the session is used.
If the timeout expires when `wait_for_completion=True`, a
:exc:`~zhmcclient.StatusTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
:exc:`~zhmcclient.StatusTimeout`: The timeout expired while
waiting for the desired partition status.
"""
result = self.manager.session.post(
self.uri + '/operations/stop',
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
if wait_for_completion:
statuses = ["stopped"]
self.wait_for_status(statuses, status_timeout)
return result
@logged_api_call
def delete(self):
"""
Delete this Partition.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Delete Partition" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.delete(self.uri)
self.manager._name_uri_cache.delete(
self.get_properties_local(self.manager._name_prop, None))
@logged_api_call
def update_properties(self, properties):
"""
Update writeable properties of this Partition.
This method serializes with other methods that access or change
properties on the same Python object.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
properties (dict): New values for the properties to be updated.
Properties not to be updated are omitted.
Allowable properties are the properties with qualifier (w) in
section 'Data model' in section 'Partition object' in the
:term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# pylint: disable=protected-access
self.manager.session.post(self.uri, body=properties)
is_rename = self.manager._name_prop in properties
if is_rename:
# Delete the old name from the cache
self.manager._name_uri_cache.delete(self.name)
self.update_properties_local(copy.deepcopy(properties))
if is_rename:
# Add the new name to the cache
self.manager._name_uri_cache.update(self.name, self.uri)
@logged_api_call
def dump_partition(self, parameters, wait_for_completion=True,
operation_timeout=None):
"""
Dump this Partition, by loading a standalone dump program from a SCSI
device and starting its execution, using the HMC operation
'Dump Partition'.
This operation requires that the CPC does not have the storage
management feature (i.e. is a z13).
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Dump Partition" task.
Parameters:
parameters (dict): Input parameters for the operation.
Allowable input parameters are defined in section
'Request body contents' in section 'Dump Partition' in the
:term:`HMC API` book.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
result = self.manager.session.post(
self.uri + '/operations/scsi-dump',
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout,
body=parameters)
return result
@logged_api_call
def start_dump_program(self, parameters, wait_for_completion=True,
operation_timeout=None):
"""
Dump this Partition, by loading a standalone dump program from a storage
volume and starting its execution, using the HMC operation
'Start Dump Program'.
This operation requires that the CPC has the storage management feature
(i.e. is a z14 or later).
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Dump Partition" task.
Parameters:
parameters (dict): Input parameters for the operation.
Allowable input parameters are defined in section
'Request body contents' in section 'Start Dump Program' in the
:term:`HMC API` book.
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
result = self.manager.session.post(
self.uri + '/operations/start-dump-program',
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout,
body=parameters)
return result
@logged_api_call
def psw_restart(self, wait_for_completion=True, operation_timeout=None):
"""
Initiates a PSW restart for this Partition, using the HMC operation
'Perform PSW Restart'.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "PSW Restart" task.
Parameters:
wait_for_completion (bool):
Boolean controlling whether this method should wait for completion
of the requested asynchronous HMC operation, as follows:
* If `True`, this method will wait for completion of the
asynchronous job performing the operation.
* If `False`, this method will return immediately once the HMC has
accepted the request to perform the operation.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for completion of the asynchronous
job performing the operation. The special value 0 means that no
timeout is set. `None` means that the default async operation
timeout of the session is used. If the timeout expires when
`wait_for_completion=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
Returns:
:class:`py:dict` or :class:`~zhmcclient.Job`:
If `wait_for_completion` is `True`, returns an empty
:class:`py:dict` object.
If `wait_for_completion` is `False`, returns a
:class:`~zhmcclient.Job` object representing the asynchronously
executing job on the HMC.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for completion of the operation.
"""
result = self.manager.session.post(
self.uri + '/operations/psw-restart',
wait_for_completion=wait_for_completion,
operation_timeout=operation_timeout)
return result
@logged_api_call
def mount_iso_image(self, image, image_name, ins_file_name):
"""
Upload an ISO image and associate it to this Partition
using the HMC operation 'Mount ISO Image'.
When the partition already has an ISO image associated,
the newly uploaded image replaces the current one.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
image (:term:`byte string` or file-like object):
The content of the ISO image.
Images larger than 2GB cannot be specified as a Byte string; they
must be specified as a file-like object.
File-like objects must have opened the file in binary mode.
image_name (:term:`string`): The displayable name of the image.
This value must be a valid Linux file name without directories,
must not contain blanks, and must end with '.iso' in lower case.
This value will be shown in the 'boot-iso-image-name' property of
this partition.
ins_file_name (:term:`string`): The path name of the INS file within
the file system of the ISO image.
This value will be shown in the 'boot-iso-ins-file' property of
this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms_str = '?image-name={}&ins-file-name={}'. \
format(quote(image_name, safe=''), quote(ins_file_name, safe=''))
self.manager.session.post(
self.uri + '/operations/mount-iso-image' + query_parms_str,
body=image)
@logged_api_call
def unmount_iso_image(self):
"""
Unmount the currently mounted ISO from this Partition using the HMC
operation 'Unmount ISO Image'. This operation sets the partition's
'boot-iso-image-name' and 'boot-iso-ins-file' properties to null.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
self.manager.session.post(
self.uri + '/operations/unmount-iso-image')
@logged_api_call
def open_os_message_channel(self, include_refresh_messages=True):
"""
Open a JMS message channel to this partition's operating system,
returning the string "topic" representing the message channel.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Operating System Messages" task at least
in view-only mode.
Parameters:
include_refresh_messages (bool):
Boolean controlling whether refresh operating systems messages
should be sent, as follows:
* If `True`, refresh messages will be recieved when the user
connects to the topic. The default.
* If `False`, refresh messages will not be recieved when the user
connects to the topic.
Returns:
:term:`string`:
Returns a string representing the os-message-notification JMS
topic. The user can connect to this topic to start the flow of
operating system messages.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'include-refresh-messages': include_refresh_messages}
result = self.manager.session.post(
self.uri + '/operations/open-os-message-channel', body)
return result['topic-name']
@logged_api_call
def send_os_command(self, os_command_text, is_priority=False):
"""
Send a command to the operating system running in this partition.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Operating System Messages" task in
modification mode.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'is-priority': is_priority,
'operating-system-command-text': os_command_text}
self.manager.session.post(
self.uri + '/operations/send-os-cmd', body)
@logged_api_call
def wait_for_status(self, status, status_timeout=None):
"""
Wait until the status of this partition has a desired value.
Parameters:
status (:term:`string` or iterable of :term:`string`):
Desired partition status or set of status values to reach; one or
more of the values defined for the 'status' property in the
data model for partitions in the :term:`HMC API` book.
status_timeout (:term:`number`):
Timeout in seconds, for waiting that the status of the partition
has reached one of the desired status values. The special value 0
means that no timeout is set.
`None` means that the default status timeout will be used.
If the timeout expires, a :exc:`~zhmcclient.StatusTimeout` is
raised.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.StatusTimeout`: The status timeout expired while
waiting for the desired partition status.
"""
if status_timeout is None:
status_timeout = \
self.manager.session.retry_timeout_config.status_timeout
if status_timeout > 0:
end_time = time.time() + status_timeout
if isinstance(status, (list, tuple)):
statuses = status
else:
statuses = [status]
while True:
# Fastest way to get actual status value:
parts = self.manager.cpc.partitions.list(
filter_args={'name': self.name})
assert len(parts) == 1
this_part = parts[0]
actual_status = this_part.get_property('status')
if actual_status in statuses:
return
if status_timeout > 0 and time.time() > end_time:
raise StatusTimeout(
"Waiting for partition {} to reach status(es) '{}' timed "
"out after {} s - current status is '{}'".
format(self.name, statuses, status_timeout, actual_status),
actual_status, statuses, status_timeout)
time.sleep(1) # Avoid hot spin loop
@logged_api_call
def increase_crypto_config(self, crypto_adapters,
crypto_domain_configurations):
"""
Add crypto adapters and/or crypto domains to the crypto configuration
of this partition.
The general principle for maintaining crypto configurations of
partitions is as follows: Each adapter included in the crypto
configuration of a partition has all crypto domains included in the
crypto configuration. Each crypto domain included in the crypto
configuration has the same access mode on all adapters included in the
crypto configuration.
Example: Assume that the current crypto configuration of a partition
includes crypto adapter A and crypto domains 0 and 1. When this method
is called to add adapter B and domain configurations for domains 1 and
2, the resulting crypto configuration of the partition will include
domains 0, 1, and 2 on each of the adapters A and B.
Authorization requirements:
* Object-access permission to this Partition.
* Object-access permission to the specified Crypto Adapter.
* Task permission to the "Partition Details" task.
Parameters:
crypto_adapters (:term:`iterable` of :class:`~zhmcclient.Adapter`):
Crypto adapters that should be added to the crypto configuration of
this partition.
crypto_domain_configurations (:term:`iterable` of `domain_config`):
Crypto domain configurations that should be added to the crypto
configuration of this partition.
A crypto domain configuration (`domain_config`) is a dictionary
with the following keys:
* ``"domain-index"`` (:term:`integer`): Domain index of the crypto
domain.
The domain index is a number in the range of 0 to a maximum that
depends on the model of the crypto adapter and the CPC model. For
the Crypto Express 5S adapter in a z13, the maximum domain index
is 84.
* ``"access-mode"`` (:term:`string`): Access mode for the crypto
domain.
The access mode specifies the way the partition can use the
crypto domain on the crypto adapter(s), using one of the
following string values:
* ``"control"`` - The partition can load cryptographic keys into
the domain, but it may not use the domain to perform
cryptographic operations.
* ``"control-usage"`` - The partition can load cryptographic keys
into the domain, and it can use the domain to perform
cryptographic operations.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
crypto_adapter_uris = [a.uri for a in crypto_adapters]
body = {'crypto-adapter-uris': crypto_adapter_uris,
'crypto-domain-configurations': crypto_domain_configurations}
self.manager.session.post(
self.uri + '/operations/increase-crypto-configuration', body)
@logged_api_call
def decrease_crypto_config(self, crypto_adapters,
crypto_domain_indexes):
"""
Remove crypto adapters and/or crypto domains from the crypto
configuration of this partition.
For the general principle for maintaining crypto configurations of
partitions, see :meth:`~zhmcclient.Partition.increase_crypto_config`.
Example: Assume that the current crypto configuration of a partition
includes crypto adapters A, B and C and crypto domains 0, 1, and 2 (on
each of the adapters). When this method is called to remove adapter C
and domain 2, the resulting crypto configuration of the partition will
include domains 0 and 1 on each of the adapters A and B.
Authorization requirements:
* Object-access permission to this Partition.
* Object-access permission to the specified Crypto Adapters.
* Task permission to the "Partition Details" task.
Parameters:
crypto_adapters (:term:`iterable` of :class:`~zhmcclient.Adapter`):
Crypto adapters that should be removed from the crypto
configuration of this partition.
crypto_domain_indexes (:term:`iterable` of :term:`integer`):
Domain indexes of the crypto domains that should be removed from
the crypto configuration of this partition. For values, see
:meth:`~zhmcclient.Partition.increase_crypto_config`.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
crypto_adapter_uris = [a.uri for a in crypto_adapters]
body = {'crypto-adapter-uris': crypto_adapter_uris,
'crypto-domain-indexes': crypto_domain_indexes}
self.manager.session.post(
self.uri + '/operations/decrease-crypto-configuration', body)
@logged_api_call
def change_crypto_domain_config(self, crypto_domain_index, access_mode):
"""
Change the access mode for a crypto domain that is currently included
in the crypto configuration of this partition.
The access mode will be changed for the specified crypto domain on all
crypto adapters currently included in the crypto configuration of this
partition.
For the general principle for maintaining crypto configurations of
partitions, see :meth:`~zhmcclient.Partition.increase_crypto_config`.
Authorization requirements:
* Object-access permission to this Partition.
* Task permission to the "Partition Details" task.
Parameters:
crypto_domain_index (:term:`integer`):
Domain index of the crypto domain to be changed. For values, see
:meth:`~zhmcclient.Partition.increase_crypto_config`.
access_mode (:term:`string`):
The new access mode for the crypto domain. For values, see
:meth:`~zhmcclient.Partition.increase_crypto_config`.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'domain-index': crypto_domain_index,
'access-mode': access_mode}
self.manager.session.post(
self.uri + '/operations/change-crypto-domain-configuration', body)
@logged_api_call
def zeroize_crypto_domain(self, crypto_adapter, crypto_domain_index):
"""
Zeroize a single crypto domain on a crypto adapter.
Zeroizing a crypto domain clears the cryptographic keys and
non-compliance mode settings in the crypto domain.
The crypto domain must be attached to this partition in "control-usage"
access mode.
Supported CPC versions: z14 GA2 and above, and the corresponding
LinuxOne systems.
Authorization requirements:
* Object-access permission to this Partition.
* Object-access permission to the specified Crypto Adapter.
* Task permission to the "Zeroize Crypto Domain" task.
Parameters:
crypto_adapter (:class:`~zhmcclient.Adapter`):
Crypto adapter with the crypto domain to be zeroized.
crypto_domain_index (:term:`integer`):
Domain index of the crypto domain to be zeroized.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {
'crypto-adapter-uri': crypto_adapter.uri,
'domain-index': crypto_domain_index
}
self.manager.session.post(
self.uri + '/operations/zeroize-crypto-domain', body)
@logged_api_call
def attach_storage_group(self, storage_group):
"""
Attach a :term:`storage group` to this partition.
This will cause the :term:`storage volumes <storage volume>` of the
storage group to be attached to the partition, instantiating any
necessary :term:`virtual storage resource` objects.
A storage group can be attached to a partition regardless of its
fulfillment state. The fulfillment state of its storage volumes
and thus of the entire storage group changes as volumes are discovered
by DPM, and will eventually reach "complete".
The CPC must have the "dpm-storage-management" feature enabled.
Authorization requirements:
* Object-access permission to this partition.
* Object-access permission to the specified storage group.
* Task permission to the "Partition Details" task.
Parameters:
storage_group (:class:`~zhmcclient.StorageGroup`):
Storage group to be attached. The storage group must not currently
be attached to this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'storage-group-uri': storage_group.uri}
self.manager.session.post(
self.uri + '/operations/attach-storage-group', body)
@logged_api_call
def detach_storage_group(self, storage_group):
"""
Detach a :term:`storage group` from this partition.
This will cause the :term:`storage volumes <storage volume>` of the
storage group to be detached from the partition, removing any
:term:`virtual storage resource` objects that had been created upon
attachment.
A storage group can be detached from a partition regardless of its
fulfillment state. The fulfillment state of its storage volumes
changes as volumes are discovered by DPM.
The CPC must have the "dpm-storage-management" feature enabled.
Authorization requirements:
* Object-access permission to this partition.
* Task permission to the "Partition Details" task.
Parameters:
storage_group (:class:`~zhmcclient.StorageGroup`):
Storage group to be detached. The storage group must currently
be attached to this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'storage-group-uri': storage_group.uri}
self.manager.session.post(
self.uri + '/operations/detach-storage-group', body)
@logged_api_call
def list_attached_storage_groups(self, full_properties=False):
"""
Return the storage groups that are attached to this partition.
The CPC must have the "dpm-storage-management" feature enabled.
Authorization requirements:
* Object-access permission to this partition.
Parameters:
full_properties (bool):
Controls that the full set of resource properties for each returned
storage group is being retrieved, vs. only the following short set:
"object-uri", "object-id", "class", "parent".
TODO: Verify short list of properties.
Returns:
List of :class:`~zhmcclient.StorageGroup` objects representing the
storage groups that are attached to this partition.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
sg_list = []
sg_uris = self.get_property('storage-group-uris')
if sg_uris:
console = self.manager.cpc.manager.console
for sg_uri in sg_uris:
sg = console.storage_groups.resource_object(sg_uri)
sg_list.append(sg)
if full_properties:
sg.pull_full_properties()
return sg_list
|
|
import annoying.fields
import django.core.validators
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("auth", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("password", models.CharField(max_length=128, verbose_name="password")),
("last_login", models.DateTimeField(default=django.utils.timezone.now, verbose_name="last login")),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
help_text="Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.",
unique=True,
max_length=30,
verbose_name="username",
validators=[
django.core.validators.RegexValidator("^[\\w.@+-]+$", "Enter a valid username.", "invalid")
],
),
),
("first_name", models.CharField(max_length=30, verbose_name="first name", blank=True)),
("last_name", models.CharField(max_length=30, verbose_name="last name", blank=True)),
("email", models.EmailField(max_length=75, verbose_name="email address", blank=True)),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
("date_joined", models.DateTimeField(default=django.utils.timezone.now, verbose_name="date joined")),
(
"groups",
models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Group",
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of his/her group.",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Permission",
blank=True,
help_text="Specific permissions for this user.",
verbose_name="user permissions",
),
),
],
options={
"abstract": False,
"verbose_name": "user",
"verbose_name_plural": "users",
},
bases=(models.Model,),
),
migrations.CreateModel(
name="Action",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
(
"name",
models.CharField(
max_length=255,
),
),
],
bases=(models.Model,),
),
migrations.CreateModel(
name="ActionRecord",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("comment", models.CharField(max_length=255, null=True, blank=True)),
("rating", models.IntegerField(null=True, blank=True)),
("date", models.DateTimeField(auto_now_add=True)),
("action", models.ForeignKey(to="moviesapp.Action", on_delete=models.CASCADE)),
],
bases=(models.Model,),
),
migrations.CreateModel(
name="List",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("title", models.CharField(max_length=255)),
("key_name", models.CharField(max_length=255)),
],
bases=(models.Model,),
),
migrations.CreateModel(
name="Movie",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
(
"title",
models.CharField(
max_length=255,
),
),
("title_ru", models.CharField(max_length=255)),
("overview", models.TextField(null=True, blank=True)),
("plot", models.TextField(null=True, blank=True)),
("director", models.CharField(max_length=255, null=True, blank=True)),
("writer", models.CharField(max_length=255, null=True, blank=True)),
("genre", models.CharField(max_length=255, null=True, blank=True)),
("actors", models.CharField(max_length=255, null=True, blank=True)),
("imdb_id", models.CharField(unique=True, max_length=15, verbose_name="IMDB id")),
("tmdb_id", models.IntegerField(unique=True, verbose_name="TMDB id")),
("imdb_rating", models.DecimalField(null=True, max_digits=2, decimal_places=1)),
(
"poster",
models.CharField(
max_length=255,
null=True,
),
),
(
"release_date",
models.DateField(
null=True,
),
),
("runtime", models.TimeField(null=True, blank=True)),
("homepage", models.URLField(null=True, verbose_name="\xd1\x81\xd0\xb0\xd0\xb9\xd1\x82", blank=True)),
("trailers", annoying.fields.JSONField(null=True, blank=True)),
],
options={
"ordering": ["pk"],
},
bases=(models.Model,),
),
migrations.CreateModel(
name="Record",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("rating", models.IntegerField(default=0)),
(
"comment",
models.CharField(
default="",
max_length=255,
),
),
(
"date",
models.DateTimeField(
auto_now_add=True,
),
),
("list", models.ForeignKey(to="moviesapp.List", on_delete=models.CASCADE)),
("movie", models.ForeignKey(related_name="records", to="moviesapp.Movie", on_delete=models.CASCADE)),
("user", models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
bases=(models.Model,),
),
migrations.AddField(
model_name="actionrecord",
name="list",
field=models.ForeignKey(blank=True, to="moviesapp.List", null=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name="actionrecord",
name="movie",
field=models.ForeignKey(to="moviesapp.Movie", on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name="actionrecord",
name="user",
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
preserve_default=True,
),
]
|
|
#!/usr/bin/env python
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Broadband Platform Version of Martin Mai BBcoda2.csh
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
# import math # (used to calculate moment)
# Import Broadband modules
import bband_utils
import velocity_models
from install_cfg import InstallCfg
from genslip_cfg import GenslipCfg, calculate_rvfac
import plot_srf
class Genslip(object):
"""
Implements Genslip as a Broadband component
Inputs: sim_id, velocity model, and source file
Outputs: an SRF file
Assumes that the input directory has been created by the workflow codes,
and that the files are transfered over into that directory.
"""
def __init__(self, i_r_velmodel, i_r_srcfile,
o_r_srffile, i_vmodel_name, sim_id=0):
"""
Initialize class variables
"""
self.sim_id = sim_id
self.r_srcfile = i_r_srcfile
self.r_velmodel = i_r_velmodel
self.r_srffile = o_r_srffile
self.vmodel_name = i_vmodel_name
self.risetime_coef = None
self.shal_vrup = None
self.mean_rvfac = None
self.range_rvfac = None
self.risetime_fac = None
self.deep_risetime_fac = None
self.slip_sigma = None
def run(self):
"""
Runs Genslip
"""
print("GP Rupture Generator GenSlip".center(80, '-'))
# Load configuration, set sim_id
install = InstallCfg.getInstance()
sim_id = self.sim_id
# Build directory paths
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_indir = os.path.join(install.A_IN_DATA_DIR, str(sim_id))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_logdir = os.path.join(install.A_OUT_LOG_DIR, str(sim_id))
# Make sure the output and tmp directories exist
bband_utils.mkdirs([a_tmpdir, a_indir, a_outdir], print_cmd=False)
# Now, file paths
self.log = os.path.join(a_logdir, "%d.genslip.log" % (sim_id))
a_srcfile = os.path.join(a_indir, self.r_srcfile)
a_velfile = os.path.join(a_indir, self.r_velmodel)
# Read src file
cfg = GenslipCfg(a_srcfile)
# Define location of input velocity model file
a_velmodel = os.path.join(a_tmpdir, self.r_velmodel)
# Get pointer to the velocity model object
vel_obj = velocity_models.get_velocity_model_by_name(self.vmodel_name)
if vel_obj is None:
raise bband_utils.ParameterError("Cannot find velocity model: %s" %
(self.vmodel_name))
# Check for velocity model-specific parameters
vmodel_params = vel_obj.get_codebase_params('gp')
# Look for RISETIME_COEF
if 'RISETIME_COEF' in vmodel_params:
self.risetime_coef = float(vmodel_params['RISETIME_COEF'])
else:
self.risetime_coef = cfg.RISETIME_COEF
# Look for SHAL_VRUP
if 'SHAL_VRUP' in vmodel_params:
self.shal_vrup = float(vmodel_params['SHAL_VRUP'])
else:
self.shal_vrup = cfg.SHAL_VRUP
# Look for MEAN_RVFAC
if 'MEAN_RVFAC' in vmodel_params:
self.mean_rvfac = float(vmodel_params['MEAN_RVFAC'])
else:
self.mean_rvfac = cfg.MEAN_RVFAC
# Look for RANGE_RVFAC
if 'RANGE_RVFAC' in vmodel_params:
self.range_rvfac = float(vmodel_params['RANGE_RVFAC'])
else:
self.range_rvfac = cfg.RANGE_RVFAC
# Look for RISETIME_FAC
if 'RISETIME_FAC' in vmodel_params:
self.risetime_fac = float(vmodel_params['RISETIME_FAC'])
else:
self.risetime_fac = cfg.RISETIME_FAC
# Look for DEEP_RISETIME_FAC
if 'DEEP_RISETIME_FAC' in vmodel_params:
self.deep_risetime_fac = float(vmodel_params['DEEP_RISETIME_FAC'])
else:
self.deep_risetime_fac = cfg.DEEP_RISETIME_FAC
# Look for SLIP SIGMA
if 'SLIP_SIGMA' in vmodel_params:
self.slip_sigma = float(vmodel_params['SLIP_SIGMA'])
else:
self.slip_sigma = cfg.SLIP_SIGMA
# Look for DT
if 'GF_DT' in vmodel_params:
gf_dt = float(vmodel_params['GF_DT'])
else:
raise bband_utils.ParameterError("Cannot find GF_DT parameter in"
"velocity model %s!" %
(self.vmodel_name))
# Calculate nstk,ndip
nstk = round(cfg.CFGDICT["fault_length"] / cfg.CFGDICT["dlen"])
ndip = round(cfg.CFGDICT["fault_width"] / cfg.CFGDICT["dwid"])
# Calculate rvfac
if "common_seed" in cfg.CFGDICT:
rvfac = calculate_rvfac(self.mean_rvfac, self.range_rvfac,
cfg.CFGDICT["common_seed"])
else:
rvfac = calculate_rvfac(self.mean_rvfac, self.range_rvfac,
cfg.CFGDICT["seed"])
# moment = math.pow(10, 1.5 * (cfg.MAG + 10.7))
# For multi-segment SRC files
if "rupture_delay" in cfg.CFGDICT:
rupture_delay = cfg.CFGDICT["rupture_delay"]
else:
rupture_delay = 0.0
if "moment_fraction" in cfg.CFGDICT:
moment_fraction = cfg.CFGDICT["moment_fraction"]
else:
moment_fraction = -1.0
if "max_fault_length" in cfg.CFGDICT:
flen_max = cfg.CFGDICT["max_fault_length"]
else:
flen_max = -1.0
r_gsftmp = "m%.2f-%.2fx%.2f.gsf" % (cfg.CFGDICT["magnitude"],
cfg.CFGDICT["dlen"],
cfg.CFGDICT["dwid"])
a_gsftmp = os.path.join(a_tmpdir, r_gsftmp)
r_outroot = "m%.2f-%.2fx%.2f_s%d-v5.4.1" % (cfg.CFGDICT["magnitude"],
cfg.CFGDICT["dlen"],
cfg.CFGDICT["dwid"],
cfg.CFGDICT["seed"])
a_srffile = os.path.join(a_indir, "%s.srf" % (r_outroot))
progstring = ("%s/fault_seg2gsf read_slip_vals=0 << EOF > %s 2>> %s\n" %
(install.A_GP_BIN_DIR, a_gsftmp, self.log) +
"1\n" +
"%f %f %f %f %f %f %f %f %d %d\n" %
(cfg.CFGDICT["lon_top_center"],
cfg.CFGDICT["lat_top_center"],
cfg.CFGDICT["depth_to_top"],
cfg.CFGDICT["strike"], cfg.CFGDICT["dip"],
cfg.CFGDICT["rake"], cfg.CFGDICT["fault_length"],
cfg.CFGDICT["fault_width"], nstk, ndip) + "EOF")
bband_utils.runprog(progstring)
progstring = ("%s/genslip-v5.4.1 read_erf=0 write_srf=1 " %
(install.A_GP_BIN_DIR) +
"read_gsf=1 write_gsf=0 infile=%s " % (a_gsftmp) +
"mag=%.2f nstk=%d ndip=%d " %
(cfg.CFGDICT["magnitude"], nstk, ndip) +
"ns=1 nh=1 " +
"kmodel=2 seed=%d slip_sigma=%f " %
(cfg.CFGDICT["seed"], self.slip_sigma) +
"circular_average=0 modified_corners=0 " +
"velfile=%s shypo=%f dhypo=%f rvfrac=%f " %
(a_velfile, cfg.CFGDICT["hypo_along_stk"],
cfg.CFGDICT["hypo_down_dip"], rvfac) +
"shal_vrup_dep=%f shal_vrup_deprange=%f shal_vrup=%f " %
(cfg.RTDEP, cfg.RTDEP_RANGE, self.shal_vrup) +
"side_taper=0.02 bot_taper=0.0 top_taper=0.0 " +
"dt=%f risetime_coef=%f plane_header=1 " %
(gf_dt, self.risetime_coef) +
"risetimefac=%f risetimedep=%f risetimedep_range=%f " %
(self.risetime_fac, cfg.RTDEP, cfg.RTDEP_RANGE) +
"rt_scalefac=%f slip_water_level=%f " %
(cfg.RT_SCALEFAC, cfg.SLIP_WATER_LEVEL) +
"deep_risetimedep=%f deep_risetimedep_range=%f " %
(cfg.DEEP_RISETIMEDEP, cfg.DEEP_RISETIMEDEP_RANGE) +
"deep_risetimefac=%f " %
(self.deep_risetime_fac) +
"flen_max=%f rupture_delay=%f moment_fraction=%f " %
(flen_max, rupture_delay, moment_fraction) +
"srf_version=2.0 rake_sigma=15.0 fdrup_time=1 " +
"deep_vrup=0.6 use_gaus=1 alpha_rough=0.01 " +
"lambda_min=0.08 tsfac_coef=1.1 tsfac1_sigma=1.0 " +
"tsfac1_scor=0.8 rtime1_sigma=%f rtime1_scor=0.5 " %
(self.slip_sigma) +
"tsfac_bzero=-0.1 tsfac_slope=-0.5 " +
"> %s 2>> %s" % (a_srffile, self.log))
bband_utils.runprog(progstring)
#
# mv result to outputfile
#
progstring = "cp %s %s" % (a_srffile, os.path.join(a_tmpdir, self.r_srffile))
bband_utils.runprog(progstring)
progstring = "cp %s %s" % (a_srffile, os.path.join(a_indir, self.r_srffile))
bband_utils.runprog(progstring)
progstring = "cp %s %s" % (a_srffile, os.path.join(a_outdir, self.r_srffile))
bband_utils.runprog(progstring)
# Plot SRF
plot_srf.run(self.r_srffile, sim_id=self.sim_id)
print("GP GenSlip Completed".center(80, '-'))
if __name__ == "__main__":
print("Testing Module: %s" % (os.path.basename(sys.argv[0])))
ME = Genslip(sys.argv[1], sys.argv[2],
sys.argv[3], sys.argv[4],
sim_id=int(sys.argv[5]))
sys.exit(0)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
import commands
import ConfigParser
import os
import urlparse
import uuid
from migrate.versioning import repository
import sqlalchemy
import cinder.db.migration as migration
import cinder.db.sqlalchemy.migrate_repo
from cinder.db.sqlalchemy.migration import versioning_api as migration_api
from cinder.openstack.common import log as logging
from cinder import test
LOG = logging.getLogger('cinder.tests.test_migrations')
def _get_connect_string(backend,
user="openstack_citest",
passwd="openstack_citest",
database="openstack_citest"):
"""
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% locals())
def _is_mysql_avail(**kwargs):
return _is_backend_avail('mysql', **kwargs)
def _is_backend_avail(backend,
user="openstack_citest",
passwd="openstack_citest",
database="openstack_citest"):
try:
if backend == "mysql":
connect_uri = _get_connect_string("mysql", user=user,
passwd=passwd, database=database)
elif backend == "postgres":
connect_uri = _get_connect_string("postgres", user=user,
passwd=passwd, database=database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def _have_mysql():
present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
if present is None:
return _is_backend_avail('mysql')
return present.lower() in ('', 'true')
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data."""
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
return sqlalchemy.Table(name, metadata, autoload=True)
class TestMigrations(test.TestCase):
"""Test sqlalchemy-migrate migrations."""
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
CONFIG_FILE_PATH = os.environ.get('CINDER_TEST_MIGRATIONS_CONF',
DEFAULT_CONFIG_FILE)
MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
REPOSITORY = repository.Repository(
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
def setUp(self):
super(TestMigrations, self).setUp()
self.snake_walk = False
self.test_databases = {}
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % TestMigrations.CONFIG_FILE_PATH)
if not self.test_databases:
if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(TestMigrations.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
except ConfigParser.ParsingError, e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
self._reset_databases()
def tearDown(self):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
super(TestMigrations, self).tearDown()
def _reset_databases(self):
def execute_cmd(cmd=None):
status, output = commands.getstatusoutput(cmd)
LOG.debug(output)
self.assertEqual(0, status)
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = "-p\"%s\"" % auth_pieces[1]
sql = ("drop database if exists %(database)s; "
"create database %(database)s;") % locals()
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"") % locals()
execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
# note(krtaylor): File creation problems with tests in
# venv using .pgpass authentication, changed to
# PGPASSWORD environment variable which is no longer
# planned to be deprecated
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("drop database if exists %(database)s;") % locals()
droptable = sqlcmd % locals()
execute_cmd(droptable)
sql = ("create database %(database)s;") % locals()
createtable = sqlcmd % locals()
execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
def test_walk_versions(self):
"""
Walks all version scripts for each tested database, ensuring
that there are no errors in the version scripts for each engine
"""
for key, engine in self.engines.items():
self._walk_versions(engine, self.snake_walk)
def test_mysql_connect_fail(self):
"""
Test that we can trigger a mysql connection failure and we fail
gracefully to ensure we don't break people without mysql
"""
if _is_mysql_avail(user="openstack_cifail"):
self.fail("Shouldn't have connected")
@test.skip_unless(_have_mysql(), "mysql not available")
def test_mysql_innodb(self):
"""
Test that table creation on mysql only builds InnoDB tables
"""
# add this to the global lists to make reset work with it, it's removed
# automaticaly in tearDown so no need to clean it up here.
connect_string = _get_connect_string('mysql')
engine = sqlalchemy.create_engine(connect_string)
self.engines["mysqlcitest"] = engine
self.test_databases["mysqlcitest"] = connect_string
# build a fully populated mysql database with all the tables
self._reset_databases()
self._walk_versions(engine, False, False)
uri = _get_connect_string('mysql', database="information_schema")
connection = sqlalchemy.create_engine(uri).connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest'")
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
def test_postgresql_connect_fail(self):
"""
Test that we can trigger a postgres connection failure and we fail
gracefully to ensure we don't break people without postgres
"""
if _is_backend_avail('postgres', user="openstack_cifail"):
self.fail("Shouldn't have connected")
@test.skip_unless(_is_backend_avail('postgres'),
"postgresql not available")
def test_postgresql_opportunistically(self):
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("postgres")
engine = sqlalchemy.create_engine(connect_string)
self.engines["postgresqlcitest"] = engine
self.test_databases["postgresqlcitest"] = connect_string
# build a fully populated postgresql database with all the tables
self._reset_databases()
self._walk_versions(engine, False, False)
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.INIT_VERSION)
self.assertEqual(migration.INIT_VERSION,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
migration_api.upgrade(engine, TestMigrations.REPOSITORY,
migration.INIT_VERSION + 1)
LOG.debug('latest version is %s' % TestMigrations.REPOSITORY.latest)
for version in xrange(migration.INIT_VERSION + 2,
TestMigrations.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
self._migrate_down(engine, version - 1)
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(
xrange(migration.INIT_VERSION + 1,
TestMigrations.REPOSITORY.latest)):
# downgrade -> upgrade -> downgrade
self._migrate_down(engine, version)
if snake_walk:
self._migrate_up(engine, version + 1)
self._migrate_down(engine, version)
def _migrate_down(self, engine, version):
migration_api.downgrade(engine,
TestMigrations.REPOSITORY,
version)
self.assertEqual(version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _prerun_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
prerun = getattr(self, "_prerun_%3.3d" % version, None)
if prerun:
data = prerun(engine)
migration_api.upgrade(engine,
TestMigrations.REPOSITORY,
version)
self.assertEqual(
version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
if with_data:
check = getattr(self, "_check_%3.3d" % version, None)
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
(version, engine))
raise
# migration 004 - change volume types to UUID
def _prerun_004(self, engine):
data = {
'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test2',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test3',
'volume_type_id': 3},
],
'volume_types': [{'name': 'vtype1'},
{'name': 'vtype2'},
{'name': 'vtype3'},
],
'volume_type_extra_specs': [{'volume_type_id': 1,
'key': 'v1',
'value': 'hotep',
},
{'volume_type_id': 1,
'key': 'v2',
'value': 'bending rodrigez',
},
{'volume_type_id': 2,
'key': 'v3',
'value': 'bending rodrigez',
},
]}
volume_types = get_table(engine, 'volume_types')
for vtype in data['volume_types']:
r = volume_types.insert().values(vtype).execute()
vtype['id'] = r.inserted_primary_key[0]
volume_type_es = get_table(engine, 'volume_type_extra_specs')
for vtes in data['volume_type_extra_specs']:
r = volume_type_es.insert().values(vtes).execute()
vtes['id'] = r.inserted_primary_key[0]
volumes = get_table(engine, 'volumes')
for vol in data['volumes']:
r = volumes.insert().values(vol).execute()
vol['id'] = r.inserted_primary_key[0]
return data
def _check_004(self, engine, data):
volumes = get_table(engine, 'volumes')
v1 = volumes.select(volumes.c.id ==
data['volumes'][0]['id']
).execute().first()
v2 = volumes.select(volumes.c.id ==
data['volumes'][1]['id']
).execute().first()
v3 = volumes.select(volumes.c.id ==
data['volumes'][2]['id']
).execute().first()
volume_types = get_table(engine, 'volume_types')
vt1 = volume_types.select(volume_types.c.name ==
data['volume_types'][0]['name']
).execute().first()
vt2 = volume_types.select(volume_types.c.name ==
data['volume_types'][1]['name']
).execute().first()
vt3 = volume_types.select(volume_types.c.name ==
data['volume_types'][2]['name']
).execute().first()
vtes = get_table(engine, 'volume_type_extra_specs')
vtes1 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][0]['key']
).execute().first()
vtes2 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][1]['key']
).execute().first()
vtes3 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][2]['key']
).execute().first()
self.assertEqual(v1['volume_type_id'], vt1['id'])
self.assertEqual(v2['volume_type_id'], vt1['id'])
self.assertEqual(v3['volume_type_id'], vt3['id'])
self.assertEqual(vtes1['volume_type_id'], vt1['id'])
self.assertEqual(vtes2['volume_type_id'], vt1['id'])
self.assertEqual(vtes3['volume_type_id'], vt2['id'])
def test_migration_005(self):
"""Test that adding source_volid column works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 4)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 5)
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertTrue(isinstance(volumes.c.source_volid.type,
sqlalchemy.types.VARCHAR))
def _metadatas(self, upgrade_to, downgrade_to=None):
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.upgrade(engine,
TestMigrations.REPOSITORY,
upgrade_to)
if downgrade_to is not None:
migration_api.downgrade(
engine, TestMigrations.REPOSITORY, downgrade_to)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
yield metadata
def metadatas_upgraded_to(self, revision):
return self._metadatas(revision)
def metadatas_downgraded_from(self, revision):
return self._metadatas(revision, revision - 1)
def test_upgrade_006_adds_provider_location(self):
for metadata in self.metadatas_upgraded_to(6):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertTrue(isinstance(snapshots.c.provider_location.type,
sqlalchemy.types.VARCHAR))
def test_downgrade_006_removes_provider_location(self):
for metadata in self.metadatas_downgraded_from(6):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertTrue('provider_location' not in snapshots.c)
def test_upgrade_007_adds_fk(self):
for metadata in self.metadatas_upgraded_to(7):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
fkey, = snapshots.c.volume_id.foreign_keys
self.assertEquals(volumes.c.id, fkey.column)
def test_downgrade_007_removes_fk(self):
for metadata in self.metadatas_downgraded_from(7):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertEquals(0, len(snapshots.c.volume_id.foreign_keys))
def test_migration_008(self):
"""Test that adding and removing the backups table works correctly"""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 7)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"backups"))
backups = sqlalchemy.Table('backups',
metadata,
autoload=True)
self.assertTrue(isinstance(backups.c.created_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(backups.c.updated_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(backups.c.deleted_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(backups.c.deleted.type,
sqlalchemy.types.BOOLEAN))
self.assertTrue(isinstance(backups.c.id.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.volume_id.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.user_id.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.project_id.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.host.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.availability_zone.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.display_name.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.display_description.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.container.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.status.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.fail_reason.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.service_metadata.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.service.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(backups.c.size.type,
sqlalchemy.types.INTEGER))
self.assertTrue(isinstance(backups.c.object_count.type,
sqlalchemy.types.INTEGER))
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 7)
self.assertFalse(engine.dialect.has_table(engine.connect(),
"backups"))
def test_migration_009(self):
"""Test adding snapshot_metadata table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 9)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
snapshot_metadata = sqlalchemy.Table('snapshot_metadata',
metadata,
autoload=True)
self.assertTrue(isinstance(snapshot_metadata.c.created_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(snapshot_metadata.c.updated_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(snapshot_metadata.c.deleted_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(snapshot_metadata.c.deleted.type,
sqlalchemy.types.BOOLEAN))
self.assertTrue(isinstance(snapshot_metadata.c.deleted.type,
sqlalchemy.types.BOOLEAN))
self.assertTrue(isinstance(snapshot_metadata.c.id.type,
sqlalchemy.types.INTEGER))
self.assertTrue(isinstance(snapshot_metadata.c.snapshot_id.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(snapshot_metadata.c.key.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(snapshot_metadata.c.value.type,
sqlalchemy.types.VARCHAR))
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 8)
self.assertFalse(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
|
|
"""
Support for Synology Surveillance Station Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.synology/
"""
import asyncio
import logging
import voluptuous as vol
import aiohttp
from aiohttp import web
from aiohttp.web_exceptions import HTTPGatewayTimeout
import async_timeout
from homeassistant.const import (
CONF_NAME, CONF_USERNAME, CONF_PASSWORD,
CONF_URL, CONF_WHITELIST, CONF_VERIFY_SSL)
from homeassistant.components.camera import (
Camera, PLATFORM_SCHEMA)
from homeassistant.helpers.aiohttp_client import (
async_get_clientsession, async_create_clientsession)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async import run_coroutine_threadsafe
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Synology Camera'
DEFAULT_STREAM_ID = '0'
TIMEOUT = 5
CONF_CAMERA_NAME = 'camera_name'
CONF_STREAM_ID = 'stream_id'
QUERY_CGI = 'query.cgi'
QUERY_API = 'SYNO.API.Info'
AUTH_API = 'SYNO.API.Auth'
CAMERA_API = 'SYNO.SurveillanceStation.Camera'
STREAMING_API = 'SYNO.SurveillanceStation.VideoStream'
SESSION_ID = '0'
WEBAPI_PATH = '/webapi/'
AUTH_PATH = 'auth.cgi'
CAMERA_PATH = 'camera.cgi'
STREAMING_PATH = 'SurveillanceStation/videoStreaming.cgi'
CONTENT_TYPE_HEADER = 'Content-Type'
SYNO_API_URL = '{0}{1}{2}'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_WHITELIST, default=[]): cv.ensure_list,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Setup a Synology IP Camera."""
verify_ssl = config.get(CONF_VERIFY_SSL)
websession_init = async_get_clientsession(hass, verify_ssl)
# Determine API to use for authentication
syno_api_url = SYNO_API_URL.format(
config.get(CONF_URL), WEBAPI_PATH, QUERY_CGI)
query_payload = {
'api': QUERY_API,
'method': 'Query',
'version': '1',
'query': 'SYNO.'
}
query_req = None
try:
with async_timeout.timeout(TIMEOUT, loop=hass.loop):
query_req = yield from websession_init.get(
syno_api_url,
params=query_payload
)
query_resp = yield from query_req.json()
auth_path = query_resp['data'][AUTH_API]['path']
camera_api = query_resp['data'][CAMERA_API]['path']
camera_path = query_resp['data'][CAMERA_API]['path']
streaming_path = query_resp['data'][STREAMING_API]['path']
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", syno_api_url)
return False
finally:
if query_req is not None:
yield from query_req.release()
# Authticate to NAS to get a session id
syno_auth_url = SYNO_API_URL.format(
config.get(CONF_URL), WEBAPI_PATH, auth_path)
session_id = yield from get_session_id(
hass,
websession_init,
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
syno_auth_url
)
# init websession
websession = async_create_clientsession(
hass, verify_ssl, cookies={'id': session_id})
# Use SessionID to get cameras in system
syno_camera_url = SYNO_API_URL.format(
config.get(CONF_URL), WEBAPI_PATH, camera_api)
camera_payload = {
'api': CAMERA_API,
'method': 'List',
'version': '1'
}
try:
with async_timeout.timeout(TIMEOUT, loop=hass.loop):
camera_req = yield from websession.get(
syno_camera_url,
params=camera_payload
)
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", syno_camera_url)
return False
camera_resp = yield from camera_req.json()
cameras = camera_resp['data']['cameras']
yield from camera_req.release()
# add cameras
devices = []
for camera in cameras:
if not config.get(CONF_WHITELIST):
camera_id = camera['id']
snapshot_path = camera['snapshot_path']
device = SynologyCamera(
hass,
websession,
config,
camera_id,
camera['name'],
snapshot_path,
streaming_path,
camera_path,
auth_path
)
devices.append(device)
yield from async_add_devices(devices)
@asyncio.coroutine
def get_session_id(hass, websession, username, password, login_url):
"""Get a session id."""
auth_payload = {
'api': AUTH_API,
'method': 'Login',
'version': '2',
'account': username,
'passwd': password,
'session': 'SurveillanceStation',
'format': 'sid'
}
auth_req = None
try:
with async_timeout.timeout(TIMEOUT, loop=hass.loop):
auth_req = yield from websession.get(
login_url,
params=auth_payload
)
auth_resp = yield from auth_req.json()
return auth_resp['data']['sid']
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", login_url)
return False
finally:
if auth_req is not None:
yield from auth_req.release()
class SynologyCamera(Camera):
"""An implementation of a Synology NAS based IP camera."""
def __init__(self, hass, websession, config, camera_id,
camera_name, snapshot_path, streaming_path, camera_path,
auth_path):
"""Initialize a Synology Surveillance Station camera."""
super().__init__()
self.hass = hass
self._websession = websession
self._name = camera_name
self._synology_url = config.get(CONF_URL)
self._camera_name = config.get(CONF_CAMERA_NAME)
self._stream_id = config.get(CONF_STREAM_ID)
self._camera_id = camera_id
self._snapshot_path = snapshot_path
self._streaming_path = streaming_path
self._camera_path = camera_path
self._auth_path = auth_path
def camera_image(self):
"""Return bytes of camera image."""
return run_coroutine_threadsafe(
self.async_camera_image(), self.hass.loop).result()
@asyncio.coroutine
def async_camera_image(self):
"""Return a still image response from the camera."""
image_url = SYNO_API_URL.format(
self._synology_url, WEBAPI_PATH, self._camera_path)
image_payload = {
'api': CAMERA_API,
'method': 'GetSnapshot',
'version': '1',
'cameraId': self._camera_id
}
try:
with async_timeout.timeout(TIMEOUT, loop=self.hass.loop):
response = yield from self._websession.get(
image_url,
params=image_payload
)
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", image_url)
return None
image = yield from response.read()
yield from response.release()
return image
@asyncio.coroutine
def handle_async_mjpeg_stream(self, request):
"""Return a MJPEG stream image response directly from the camera."""
streaming_url = SYNO_API_URL.format(
self._synology_url, WEBAPI_PATH, self._streaming_path)
streaming_payload = {
'api': STREAMING_API,
'method': 'Stream',
'version': '1',
'cameraId': self._camera_id,
'format': 'mjpeg'
}
stream = None
response = None
try:
with async_timeout.timeout(TIMEOUT, loop=self.hass.loop):
stream = yield from self._websession.get(
streaming_url,
params=streaming_payload
)
response = web.StreamResponse()
response.content_type = stream.headers.get(CONTENT_TYPE_HEADER)
yield from response.prepare(request)
while True:
data = yield from stream.content.read(102400)
if not data:
break
response.write(data)
except (asyncio.TimeoutError, aiohttp.errors.ClientError):
_LOGGER.exception("Error on %s", streaming_url)
raise HTTPGatewayTimeout()
finally:
if stream is not None:
self.hass.async_add_job(stream.release())
if response is not None:
yield from response.write_eof()
@property
def name(self):
"""Return the name of this device."""
return self._name
|
|
"""Quality control and summary metrics for next-gen alignments and analysis.
"""
import collections
import copy
import csv
import os
import yaml
from datetime import datetime
import pandas as pd
import glob
import toolz as tz
from bcbio import bam
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.log import logger
from bcbio.pipeline import config_utils, run_info
import bcbio.pipeline.datadict as dd
from bcbio.provenance import do
from bcbio.rnaseq import gtf
from bcbio.variation import damage, peddy, vcfutils, vcfanno
import six
# ## High level functions to generate summary
def qc_to_rec(samples):
"""CWL: Convert a set of input samples into records for parallelization.
"""
samples = [utils.to_single_data(x) for x in samples]
samples = cwlutils.assign_complex_to_samples(samples)
to_analyze, extras = _split_samples_by_qc(samples)
recs = cwlutils.samples_to_records([utils.to_single_data(x) for x in to_analyze + extras])
return [[x] for x in recs]
def generate_parallel(samples, run_parallel):
"""Provide parallel preparation of summary information for alignment and variant calling.
"""
to_analyze, extras = _split_samples_by_qc(samples)
qced = run_parallel("pipeline_summary", to_analyze)
samples = _combine_qc_samples(qced) + extras
qsign_info = run_parallel("qsignature_summary", [samples])
metadata_file = _merge_metadata([samples])
summary_file = write_project_summary(samples, qsign_info)
out = []
for data in samples:
if "summary" not in data[0]:
data[0]["summary"] = {}
data[0]["summary"]["project"] = summary_file
data[0]["summary"]["metadata"] = metadata_file
if qsign_info:
data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"]
out.append(data)
out = _add_researcher_summary(out, summary_file)
# MultiQC must be run after all file outputs are set:
return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])]
def pipeline_summary(data):
"""Provide summary information on processing sample.
Handles standard and CWL (single QC output) cases.
"""
data = utils.to_single_data(data)
if data["analysis"].startswith("wgbs-seq"):
bismark_bam = dd.get_align_bam(data)
sorted_bam = bam.sort(bismark_bam, data["config"])
data = dd.set_align_bam(data, sorted_bam)
data = dd.set_work_bam(data, bismark_bam)
work_bam = dd.get_align_bam(data) or dd.get_work_bam(data)
if not work_bam or not work_bam.endswith(".bam"):
work_bam = None
if dd.get_ref_file(data):
if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam
logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data))))
work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data)
data["summary"] = _run_qc_tools(work_bam, work_data)
if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data):
data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0])
return [[data]]
def get_qc_tools(data):
"""Retrieve a list of QC tools to use based on configuration and analysis type.
Uses defaults if previously set.
"""
if dd.get_algorithm_qc(data):
return dd.get_algorithm_qc(data)
analysis = data["analysis"].lower()
to_run = []
if tz.get_in(["config", "algorithm", "kraken"], data):
to_run.append("kraken")
if "fastqc" not in dd.get_tools_off(data):
to_run.append("fastqc")
if any([tool in dd.get_tools_on(data)
for tool in ["qualimap", "qualimap_full"]]):
to_run.append("qualimap")
if analysis.startswith("rna-seq") or analysis == "smallrna-seq":
if "qualimap" not in dd.get_tools_off(data):
if gtf.is_qualimap_compatible(dd.get_gtf_file(data)):
to_run.append("qualimap_rnaseq")
else:
logger.debug("GTF not compatible with Qualimap, skipping.")
if analysis.startswith("chip-seq"):
to_run.append("chipqc")
if dd.get_chip_method(data) == "atac":
to_run.append("ataqv")
if analysis.startswith("smallrna-seq"):
to_run.append("small-rna")
to_run.append("atropos")
if "coverage_qc" not in dd.get_tools_off(data):
to_run.append("samtools")
if dd.has_variantcalls(data):
if "coverage_qc" not in dd.get_tools_off(data):
to_run += ["coverage", "picard"]
to_run += ["qsignature", "variants"]
if vcfanno.is_human(data):
to_run += ["peddy"]
if "contamination" not in dd.get_tools_off(data):
to_run += ["contamination"]
if vcfutils.get_paired_phenotype(data):
if "viral" not in dd.get_tools_off(data):
to_run += ["viral"]
if damage.should_filter([data]):
to_run += ["damage"]
if dd.get_umi_consensus(data):
to_run += ["umi"]
if tz.get_in(["config", "algorithm", "preseq"], data):
to_run.append("preseq")
to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)]
to_run.sort()
return to_run
def _run_qc_tools(bam_file, data):
"""Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools
"""
from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken,
qsignature, qualimap, samtools, picard, srna, umi, variant,
viral, preseq, chipseq, atac)
tools = {"fastqc": fastqc.run,
"atropos": atropos.run,
"small-rna": srna.run,
"samtools": samtools.run,
"qualimap": qualimap.run,
"qualimap_rnaseq": qualimap.run_rnaseq,
"qsignature": qsignature.run,
"contamination": contamination.run,
"coverage": coverage.run,
"damage": damage.run,
"variants": variant.run,
"peddy": peddy.run_qc,
"kraken": kraken.run,
"picard": picard.run,
"umi": umi.run,
"viral": viral.run,
"preseq": preseq.run,
"chipqc": chipseq.run,
"ataqv": atac.run
}
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
metrics = {}
qc_out = utils.deepish_copy(dd.get_summary_qc(data))
for program_name in dd.get_algorithm_qc(data):
if not bam_file and program_name != "kraken": # kraken doesn't need bam
continue
if dd.get_phenotype(data) == "germline" and program_name != "variants":
continue
qc_fn = tools[program_name]
cur_qc_dir = os.path.join(qc_dir, program_name)
out = qc_fn(bam_file, data, cur_qc_dir)
qc_files = None
if out and isinstance(out, dict):
# Check for metrics output, two cases:
# 1. output with {"metrics"} and files ("base")
if "metrics" in out:
metrics.update(out.pop("metrics"))
# 2. a dictionary of metrics
elif "base" not in out:
metrics.update(out)
# Check for files only output
if "base" in out:
qc_files = out
elif out and isinstance(out, six.string_types) and os.path.exists(out):
qc_files = {"base": out, "secondary": []}
if not qc_files:
qc_files = _organize_qc_files(program_name, cur_qc_dir)
if qc_files:
qc_out[program_name] = qc_files
metrics["Name"] = dd.get_sample_name(data)
metrics["Quality format"] = dd.get_quality_format(data).lower()
return {"qc": qc_out, "metrics": metrics}
def _organize_qc_files(program, qc_dir):
"""Organize outputs from quality control runs into a base file and secondary outputs.
Provides compatibility with CWL output. Returns None if no files created during processing.
"""
base_files = {"fastqc": "fastqc_report.html",
"qualimap_rnaseq": "qualimapReport.html",
"qualimap": "qualimapReport.html"}
if os.path.exists(qc_dir):
out_files = []
for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]:
if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"):
out_files.append(fname)
elif os.path.isdir(fname) and not fname.endswith("tx"):
for root, dirs, files in os.walk(fname):
for f in files:
if not f.endswith(".bcbiotmp"):
out_files.append(os.path.join(root, f))
if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]):
if len(out_files) == 1:
base = out_files[0]
secondary = []
else:
base = None
if program in base_files:
base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])]
if len(base_choices) == 1:
base = base_choices[0]
if not base:
base = out_files[0]
secondary = [x for x in out_files if x != base]
return {"base": base, "secondary": secondary}
# ## Allow parallelization for separate QC runs
def _split_samples_by_qc(samples):
"""Split data into individual quality control steps for a run.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
qcs = dd.get_algorithm_qc(data)
# kraken doesn't need bam
if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or
tz.get_in(["config", "algorithm", "kraken"], data)):
for qc in qcs:
add = copy.deepcopy(data)
add["config"]["algorithm"]["qc"] = [qc]
to_process.append([add])
else:
extras.append([data])
return to_process, extras
def _combine_qc_samples(samples):
"""Combine split QC analyses into single samples based on BAM files.
"""
by_bam = collections.defaultdict(list)
for data in [utils.to_single_data(x) for x in samples]:
batch = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batch, (list, tuple)):
batch = [batch]
batch = tuple(batch)
by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data)
out = []
for data_group in by_bam.values():
data = data_group[0]
alg_qc = []
qc = {}
metrics = {}
for d in data_group:
qc.update(dd.get_summary_qc(d))
metrics.update(dd.get_summary_metrics(d))
alg_qc.extend(dd.get_algorithm_qc(d))
data["config"]["algorithm"]["qc"] = alg_qc
data["summary"]["qc"] = qc
data["summary"]["metrics"] = metrics
out.append([data])
return out
# ## Generate project level QC summary for quickly assessing large projects
def write_project_summary(samples, qsign_info=None):
"""Write project summary information on the provided samples.
write out dirs, genome resources,
"""
work_dir = samples[0][0]["dirs"]["work"]
out_file = os.path.join(work_dir, "project-summary.yaml")
upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"])
if "dir" in samples[0][0]["upload"] else "")
date = str(datetime.now())
prev_samples = _other_pipeline_samples(out_file, samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump({"date": date}, out_handle,
default_flow_style=False, allow_unicode=False)
if qsign_info:
qsign_out = utils.deepish_copy(qsign_info[0])
qsign_out.pop("out_dir", None)
yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False,
allow_unicode=False)
yaml.safe_dump({"upload": upload_dir}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle,
default_flow_style=False, allow_unicode=False)
return out_file
def _merge_metadata(samples):
"""Merge all metadata into CSV file"""
samples = list(utils.flatten(samples))
out_dir = dd.get_work_dir(samples[0])
logger.info("summarize metadata")
out_file = os.path.join(out_dir, "metadata.csv")
sample_metrics = collections.defaultdict(dict)
for s in samples:
m = tz.get_in(['metadata'], s)
if isinstance(m, six.string_types):
m = json.loads(m)
if m:
for me in list(m.keys()):
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
pd.DataFrame(sample_metrics).transpose().to_csv(out_file)
return out_file
def _other_pipeline_samples(summary_file, cur_samples):
"""Retrieve samples produced previously by another pipeline in the summary output.
"""
cur_descriptions = set([s[0]["description"] for s in cur_samples])
out = []
if utils.file_exists(summary_file):
with open(summary_file) as in_handle:
for s in yaml.safe_load(in_handle).get("samples", []):
if s["description"] not in cur_descriptions:
out.append(s)
return out
def _save_fields(sample):
to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata",
"description"]
saved = {k: sample[k] for k in to_save if k in sample}
if "summary" in sample and "metrics" in sample["summary"]:
saved["summary"] = {"metrics": sample["summary"]["metrics"]}
return saved
# ## Generate researcher specific summaries
def _add_researcher_summary(samples, summary_yaml):
"""Generate summary files per researcher if organized via a LIMS.
"""
by_researcher = collections.defaultdict(list)
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
by_researcher[researcher].append(data["description"])
out_by_researcher = {}
for researcher, descrs in by_researcher.items():
out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher,
set(descrs), samples[0][0])
out = []
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
data["summary"]["researcher"] = out_by_researcher[researcher]
out.append([data])
return out
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data):
"""Generate a CSV file with summary information for a researcher on this project.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")),
"%s-summary.tsv" % run_info.clean_name(researcher))
metrics = ["Total_reads", "Mapped_reads", "Mapped_reads_pct", "Duplicates", "Duplicates_pct"]
with open(summary_yaml) as in_handle:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(["Name"] + metrics)
for sample in yaml.safe_load(in_handle)["samples"]:
if sample["description"] in descrs:
row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "")
for x in metrics]
writer.writerow(row)
return out_file
# ## Galaxy functionality
def prep_pdf(qc_dir, config):
"""Create PDF from HTML summary outputs in QC directory.
Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1
Thanks to: https://www.biostars.org/p/16991/
Works around issues with CSS conversion on CentOS by adjusting CSS.
"""
html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html")
html_fixed = "%s-fixed%s" % os.path.splitext(html_file)
try:
topdf = config_utils.get_program("wkhtmltopdf", config)
except config_utils.CmdNotFound:
topdf = None
if topdf and utils.file_exists(html_file):
out_file = "%s.pdf" % os.path.splitext(html_file)[0]
if not utils.file_exists(out_file):
cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s"
% (html_file, html_fixed))
do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf")
cmd = [topdf, html_fixed, out_file]
do.run(cmd, "Convert QC HTML to PDF")
return out_file
|
|
# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "vmriccox"
"""
Generation of the heat templates from the base template
"""
import json
import os
import shutil
from experimental_framework import common
from experimental_framework.constants import framework_parameters as fp
class TreeNode:
"""
This class represent the node of the configuration tree.\
Each node represents a single configuration value for a single
configuration parameter.
"""
def __init__(self):
self.up = None
self.down = []
self.variable_name = ''
self.variable_value = 0
def add_child(self, node):
"""
Adds a node as a child for the current node
:param node: node to be added as a child (type: TreeNode)
:return: None
"""
node.up = self
self.down.append(node)
def get_parent(self):
"""
Returns the parent node of the current one
:return type: TreeNode
"""
return self.up
def get_children(self):
"""
Returns the children of the current node
:return type: list of TreeNode
"""
if len(self.down) == 0:
# return [self]
return []
return self.down
def get_variable_name(self):
"""
Returns the name of the variable correspondent to the current node
:return type: str
"""
return self.variable_name
def get_variable_value(self):
"""
Returns the value of the variable correspondent to the current node
:return type: str or int
"""
return self.variable_value
def set_variable_name(self, name):
"""
Sets the name of the variable for the current node
:param name: Name of the variable (type: str)
:return None
"""
self.variable_name = name
def set_variable_value(self, value):
"""
Sets the value of the variable for the current node
:param value: value of the variable (type: str)
:return None
"""
self.variable_value = value
def get_path(self):
"""
Returns all the path from the current node to the root of the tree.
:return type: list of TreeNode
"""
ret_val = []
if not self.up:
ret_val.append(self)
return ret_val
for node in self.up.get_path():
ret_val.append(node)
ret_val.append(self)
return ret_val
def __str__(self):
return str(self.variable_name) + " --> " + str(self.variable_value)
def __repr__(self):
return str(self.variable_name) + " = " + str(self.variable_value)
@staticmethod
def _get_leaves(node, leaves):
"""
Returns all the leaves of a tree.
It changes the "leaves" list.
:param node: root of the tree (type: TreeNode)
:param leaves: partial list of leaves (type: list of TreeNode)
:return type: None
"""
children = node.get_children()
if len(children) == 0:
leaves.append(node)
return
for child in children:
TreeNode._get_leaves(child, leaves)
@staticmethod
def get_leaves(node):
"""
Returns all the leaves of a tree.
:param node: root of the tree (TreeNode)
:return type: list
"""
leaves = list()
TreeNode._get_leaves(node, leaves)
return leaves
template_name = fp.EXPERIMENT_TEMPLATE_NAME
def generates_templates(base_heat_template, deployment_configuration):
"""
Generates the heat templates for the experiments
:return: None
"""
# Load useful parameters from file
template_dir = common.get_template_dir()
template_file_extension = fp.TEMPLATE_FILE_EXTENSION
template_base_name = base_heat_template
variables = deployment_configuration
# Delete the templates eventually generated in previous running of the
# framework
common.LOG.info("Removing the heat templates previously generated")
os.system("rm " + template_dir + template_name + "_*")
# Creation of the tree with all the new configurations
common.LOG.info("Creation of the tree with all the new configurations")
tree = TreeNode()
for variable in variables:
leaves = TreeNode.get_leaves(tree)
common.LOG.debug("LEAVES: " + str(leaves))
common.LOG.debug("VALUES: " + str(variables[variable]))
for value in variables[variable]:
for leaf in leaves:
new_node = TreeNode()
new_node.set_variable_name(variable)
new_node.set_variable_value(value)
leaf.add_child(new_node)
common.LOG.debug("CONFIGURATION TREE: " + str(tree))
common.LOG.info("Heat Template and metadata file creation")
leaves = TreeNode.get_leaves(tree)
counter = 1
for leaf in leaves:
heat_template_vars = leaf.get_path()
if os.path.isabs(template_base_name):
base_template = template_base_name
else:
base_template = template_dir + template_base_name
new_template = template_dir + template_name
new_template += "_" + str(counter) + template_file_extension
shutil.copy(base_template, new_template)
metadata = dict()
for var in heat_template_vars:
if var.get_variable_name():
common.replace_in_file(new_template, "#" +
var.get_variable_name(),
var.get_variable_value())
metadata[var.get_variable_name()] = var.get_variable_value()
# Save the metadata on a JSON file
with open(new_template + ".json", 'w') as outfile:
json.dump(metadata, outfile)
common.LOG.debug("Heat Templates and Metadata file " + str(counter) +
" created")
counter += 1
# Creation of the template files
common.LOG.info(str(counter - 1) + " Heat Templates and Metadata files "
"created")
def get_all_heat_templates(template_dir, template_file_extension):
"""
Loads and returns all the generated heat templates
:param template_dir: directory to search in (type: str)
:param template_file_extension: extension of the file for templates
(type: str)
:return: type: list
"""
template_files = list()
for dirname, dirnames, filenames in os.walk(template_dir):
for filename in filenames:
if template_file_extension in filename and \
filename.endswith(template_file_extension) and \
template_name in filename:
template_files.append(filename)
template_files.sort()
return template_files
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import heatclient.v1.client as heatclient
from oslo_log import log as logging
from congress.datasources import constants
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils as ds_utils
LOG = logging.getLogger(__name__)
class HeatV1Driver(datasource_driver.PollingDataSourceDriver,
datasource_driver.ExecutionDriver):
STACKS = "stacks"
STACKS_LINKS = "stacks_links"
DEPLOYMENTS = "deployments"
DEPLOYMENT_OUTPUT_VALUES = "deployment_output_values"
RESOURCES = "resources"
RESOURCES_LINKS = "resources_links"
EVENTS = "events"
EVENTS_LINKS = "events_links"
# TODO(thinrichs): add snapshots
value_trans = {'translation-type': 'VALUE'}
stacks_links_translator = {
'translation-type': 'HDICT',
'table-name': STACKS_LINKS,
'parent-key': 'id',
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'field-translators':
({'fieldname': 'href', 'translator': value_trans},
{'fieldname': 'rel', 'translator': value_trans})}
stacks_translator = {
'translation-type': 'HDICT',
'table-name': STACKS,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'stack_name', 'translator': value_trans},
{'fieldname': 'description', 'translator': value_trans},
{'fieldname': 'creation_time', 'translator': value_trans},
{'fieldname': 'updated_time', 'translator': value_trans},
{'fieldname': 'stack_status', 'translator': value_trans},
{'fieldname': 'stack_status_reason', 'translator': value_trans},
{'fieldname': 'stack_owner', 'translator': value_trans},
{'fieldname': 'parent', 'translator': value_trans},
{'fieldname': 'links', 'translator': stacks_links_translator})}
deployments_output_values_translator = {
'translation-type': 'HDICT',
'table-name': DEPLOYMENT_OUTPUT_VALUES,
'parent-key': 'id',
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'deploy_stdout', 'translator': value_trans},
{'fieldname': 'deploy_stderr', 'translator': value_trans},
{'fieldname': 'deploy_status_code', 'translator': value_trans},
{'fieldname': 'result', 'translator': value_trans})}
software_deployment_translator = {
'translation-type': 'HDICT',
'table-name': DEPLOYMENTS,
'selector-type': 'DOT_SELECTOR',
'field-translators':
({'fieldname': 'status', 'translator': value_trans},
{'fieldname': 'server_id', 'translator': value_trans},
{'fieldname': 'config_id', 'translator': value_trans},
{'fieldname': 'action', 'translator': value_trans},
{'fieldname': 'status_reason', 'translator': value_trans},
{'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'output_values',
'translator': deployments_output_values_translator})}
resources_links_translator = {
'translation-type': 'HDICT',
'table-name': RESOURCES_LINKS,
'parent-key': 'physical_resource_id',
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'field-translators':
({'fieldname': 'href', 'translator': value_trans},
{'fieldname': 'rel', 'translator': value_trans})}
resources_translator = {
'translation-type': 'HDICT',
'table-name': RESOURCES,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'physical_resource_id', 'translator': value_trans},
{'fieldname': 'logical_resource_id', 'translator': value_trans},
{'fieldname': 'stack_id', 'translator': value_trans},
{'fieldname': 'resource_name', 'translator': value_trans},
{'fieldname': 'resource_type', 'translator': value_trans},
{'fieldname': 'creation_time', 'translator': value_trans},
{'fieldname': 'updated_time', 'translator': value_trans},
{'fieldname': 'resource_status', 'translator': value_trans},
{'fieldname': 'resource_status_reason', 'translator': value_trans},
{'fieldname': 'links', 'translator': resources_links_translator})}
events_links_translator = {
'translation-type': 'HDICT',
'table-name': EVENTS_LINKS,
'parent-key': 'id',
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'field-translators':
({'fieldname': 'href', 'translator': value_trans},
{'fieldname': 'rel', 'translator': value_trans})}
events_translator = {
'translation-type': 'HDICT',
'table-name': EVENTS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'physical_resource_id', 'translator': value_trans},
{'fieldname': 'logical_resource_id', 'translator': value_trans},
{'fieldname': 'stack_id', 'translator': value_trans},
{'fieldname': 'resource_name', 'translator': value_trans},
{'fieldname': 'event_time', 'translator': value_trans},
{'fieldname': 'resource_status', 'translator': value_trans},
{'fieldname': 'resource_status_reason', 'translator': value_trans},
{'fieldname': 'links', 'translator': events_links_translator})}
TRANSLATORS = [stacks_translator, software_deployment_translator,
resources_translator, events_translator]
def __init__(self, name='', args=None):
super(HeatV1Driver, self).__init__(name, args=args)
datasource_driver.ExecutionDriver.__init__(self)
self.creds = args
session = ds_utils.get_keystone_session(self.creds)
endpoint = session.get_endpoint(service_type='orchestration',
interface='publicURL')
self.heat = heatclient.Client(session=session, endpoint=endpoint)
self.initialize_update_methods()
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'heat'
result['description'] = ('Datasource driver that interfaces with'
' OpenStack orchestration aka heat.')
result['config'] = ds_utils.get_openstack_required_config()
result['config']['lazy_tables'] = constants.OPTIONAL
result['secret'] = ['password']
return result
def initialize_update_methods(self):
stacks_method = lambda: self._translate_stacks(
{'stacks': self.heat.stacks.list()})
self.add_update_method(stacks_method, self.stacks_translator)
resources_method = lambda: self._translate_resources(
self._get_resources(self.heat.stacks.list()))
self.add_update_method(resources_method, self.resources_translator)
events_method = lambda: self._translate_events(
self._get_events(self.heat.stacks.list()))
self.add_update_method(events_method, self.events_translator)
deployments_method = lambda: self._translate_software_deployment(
{'deployments': self.heat.software_deployments.list()})
self.add_update_method(deployments_method,
self.software_deployment_translator)
def _get_resources(self, stacks):
rval = []
for stack in stacks:
resources = self.heat.resources.list(stack.id)
for resource in resources:
resource = resource.to_dict()
resource['stack_id'] = stack.id
rval.append(resource)
return {'resources': rval}
def _get_events(self, stacks):
rval = []
for stack in stacks:
events = self.heat.events.list(stack.id)
for event in events:
event = event.to_dict()
event['stack_id'] = stack.id
rval.append(event)
return {'events': rval}
@ds_utils.update_state_on_changed(STACKS)
def _translate_stacks(self, obj):
"""Translate the stacks represented by OBJ into tables."""
LOG.debug("STACKS: %s", str(dict(obj)))
row_data = HeatV1Driver.convert_objs(
obj['stacks'], HeatV1Driver.stacks_translator)
return row_data
@ds_utils.update_state_on_changed(DEPLOYMENTS)
def _translate_software_deployment(self, obj):
"""Translate the stacks represented by OBJ into tables."""
LOG.debug("Software Deployments: %s", str(dict(obj)))
row_data = HeatV1Driver.convert_objs(
obj['deployments'], HeatV1Driver.software_deployment_translator)
return row_data
@ds_utils.update_state_on_changed(RESOURCES)
def _translate_resources(self, obj):
"""Translate the resources represented by OBJ into tables."""
LOG.debug("Resources: %s", str(dict(obj)))
row_data = HeatV1Driver.convert_objs(
obj['resources'], HeatV1Driver.resources_translator)
return row_data
@ds_utils.update_state_on_changed(EVENTS)
def _translate_events(self, obj):
"""Translate the events represented by OBJ into tables."""
LOG.debug("Events: %s", str(dict(obj)))
row_data = HeatV1Driver.convert_objs(
obj['events'], HeatV1Driver.events_translator)
return row_data
def execute(self, action, action_args):
"""Overwrite ExecutionDriver.execute()."""
# action can be written as a method or an API call.
func = getattr(self, action, None)
if func and self.is_executable(func):
func(action_args)
else:
self._execute_api(self.heat, action, action_args)
|
|
#!/usr/bin/env python3
"""
Testing dec2flt
===============
These are *really* extensive tests. Expect them to run for hours. Due to the
nature of the problem (the input is a string of arbitrary length), exhaustive
testing is not really possible. Instead, there are exhaustive tests for some
classes of inputs for which that is feasible and a bunch of deterministic and
random non-exhaustive tests for covering everything else.
The actual tests (generating decimal strings and feeding them to dec2flt) is
performed by a set of stand-along rust programs. This script compiles, runs,
and supervises them. The programs report the strings they generate and the
floating point numbers they converted those strings to, and this script
checks that the results are correct.
You can run specific tests rather than all of them by giving their names
(without .rs extension) as command line parameters.
Verification
------------
The tricky part is not generating those inputs but verifying the outputs.
Comparing with the result of Python's float() does not cut it because
(and this is apparently undocumented) although Python includes a version of
Martin Gay's code including the decimal-to-float part, it doesn't actually use
it for float() (only for round()) instead relying on the system scanf() which
is not necessarily completely accurate.
Instead, we take the input and compute the true value with bignum arithmetic
(as a fraction, using the ``fractions`` module).
Given an input string and the corresponding float computed via Rust, simply
decode the float into f * 2^k (for integers f, k) and the ULP.
We can now easily compute the error and check if it is within 0.5 ULP as it
should be. Zero and infinites are handled similarly:
- If the approximation is 0.0, the exact value should be *less or equal*
half the smallest denormal float: the smallest denormal floating point
number has an odd mantissa (00...001) and thus half of that is rounded
to 00...00, i.e., zero.
- If the approximation is Inf, the exact value should be *greater or equal*
to the largest finite float + 0.5 ULP: the largest finite float has an odd
mantissa (11...11), so that plus half an ULP is rounded up to the nearest
even number, which overflows.
Implementation details
----------------------
This directory contains a set of single-file Rust programs that perform
tests with a particular class of inputs. Each is compiled and run without
parameters, outputs (f64, f32, decimal) pairs to verify externally, and
in any case either exits gracefully or with a panic.
If a test binary writes *anything at all* to stderr or exits with an
exit code that's not 0, the test fails.
The output on stdout is treated as (f64, f32, decimal) record, encoded thusly:
- First, the bits of the f64 encoded as an ASCII hex string.
- Second, the bits of the f32 encoded as an ASCII hex string.
- Then the corresponding string input, in ASCII
- The record is terminated with a newline.
Incomplete records are an error. Not-a-Number bit patterns are invalid too.
The tests run serially but the validation for a single test is parallelized
with ``multiprocessing``. Each test is launched as a subprocess.
One thread supervises it: Accepts and enqueues records to validate, observe
stderr, and waits for the process to exit. A set of worker processes perform
the validation work for the outputs enqueued there. Another thread listens
for progress updates from the workers.
Known issues
------------
Some errors (e.g., NaN outputs) aren't handled very gracefully.
Also, if there is an exception or the process is interrupted (at least on
Windows) the worker processes are leaked and stick around forever.
They're only a few megabytes each, but still, this script should not be run
if you aren't prepared to manually kill a lot of orphaned processes.
"""
from __future__ import print_function
import sys
import os.path
import time
import struct
from fractions import Fraction
from collections import namedtuple
from subprocess import Popen, check_call, PIPE
from glob import glob
import multiprocessing
import threading
import ctypes
import binascii
try: # Python 3
import queue as Queue
except ImportError: # Python 2
import Queue
NUM_WORKERS = 2
UPDATE_EVERY_N = 50000
INF = namedtuple('INF', '')()
NEG_INF = namedtuple('NEG_INF', '')()
ZERO = namedtuple('ZERO', '')()
MAILBOX = None # The queue for reporting errors to the main process.
STDOUT_LOCK = threading.Lock()
test_name = None
child_processes = []
exit_status = 0
def msg(*args):
with STDOUT_LOCK:
print("[" + test_name + "]", *args)
sys.stdout.flush()
def write_errors():
global exit_status
f = open("errors.txt", 'w')
have_seen_error = False
while True:
args = MAILBOX.get()
if args is None:
f.close()
break
print(*args, file=f)
f.flush()
if not have_seen_error:
have_seen_error = True
msg("Something is broken:", *args)
msg("Future errors logged to errors.txt")
exit_status = 101
def cargo():
print("compiling tests")
sys.stdout.flush()
check_call(['cargo', 'build', '--release'])
def run(test):
global test_name
test_name = test
t0 = time.perf_counter()
msg("setting up supervisor")
command = ['cargo', 'run', '--bin', test, '--release']
proc = Popen(command, bufsize=1<<20 , stdin=PIPE, stdout=PIPE, stderr=PIPE)
done = multiprocessing.Value(ctypes.c_bool)
queue = multiprocessing.Queue(maxsize=5)#(maxsize=1024)
workers = []
for n in range(NUM_WORKERS):
worker = multiprocessing.Process(name='Worker-' + str(n + 1),
target=init_worker,
args=[test, MAILBOX, queue, done])
workers.append(worker)
child_processes.append(worker)
for worker in workers:
worker.start()
msg("running test")
interact(proc, queue)
with done.get_lock():
done.value = True
for worker in workers:
worker.join()
msg("python is done")
assert queue.empty(), "did not validate everything"
dt = time.perf_counter() - t0
msg("took", round(dt, 3), "seconds")
def interact(proc, queue):
n = 0
while proc.poll() is None:
line = proc.stdout.readline()
if not line:
continue
assert line.endswith(b'\n'), "incomplete line: " + repr(line)
queue.put(line)
n += 1
if n % UPDATE_EVERY_N == 0:
msg("got", str(n // 1000) + "k", "records")
msg("rust is done. exit code:", proc.returncode)
rest, stderr = proc.communicate()
if stderr:
msg("rust stderr output:", stderr)
for line in rest.split(b'\n'):
if not line:
continue
queue.put(line)
def main():
global MAILBOX
files = glob('src/bin/*.rs')
basenames = [os.path.basename(i) for i in files]
all_tests = [os.path.splitext(f)[0] for f in basenames if not f.startswith('_')]
args = sys.argv[1:]
if args:
tests = [test for test in all_tests if test in args]
else:
tests = all_tests
if not tests:
print("Error: No tests to run")
sys.exit(1)
# Compile first for quicker feedback
cargo()
# Set up mailbox once for all tests
MAILBOX = multiprocessing.Queue()
mailman = threading.Thread(target=write_errors)
mailman.daemon = True
mailman.start()
for test in tests:
run(test)
MAILBOX.put(None)
mailman.join()
# ---- Worker thread code ----
POW2 = { e: Fraction(2) ** e for e in range(-1100, 1100) }
HALF_ULP = { e: (Fraction(2) ** e)/2 for e in range(-1100, 1100) }
DONE_FLAG = None
def send_error_to_supervisor(*args):
MAILBOX.put(args)
def init_worker(test, mailbox, queue, done):
global test_name, MAILBOX, DONE_FLAG
test_name = test
MAILBOX = mailbox
DONE_FLAG = done
do_work(queue)
def is_done():
with DONE_FLAG.get_lock():
return DONE_FLAG.value
def do_work(queue):
while True:
try:
line = queue.get(timeout=0.01)
except Queue.Empty:
if queue.empty() and is_done():
return
else:
continue
bin64, bin32, text = line.rstrip().split()
validate(bin64, bin32, text.decode('utf-8'))
def decode_binary64(x):
"""
Turn a IEEE 754 binary64 into (mantissa, exponent), except 0.0 and
infinity (positive and negative), which return ZERO, INF, and NEG_INF
respectively.
"""
x = binascii.unhexlify(x)
assert len(x) == 8, repr(x)
[bits] = struct.unpack(b'>Q', x)
if bits == 0:
return ZERO
exponent = (bits >> 52) & 0x7FF
negative = bits >> 63
low_bits = bits & 0xFFFFFFFFFFFFF
if exponent == 0:
mantissa = low_bits
exponent += 1
if mantissa == 0:
return ZERO
elif exponent == 0x7FF:
assert low_bits == 0, "NaN"
if negative:
return NEG_INF
else:
return INF
else:
mantissa = low_bits | (1 << 52)
exponent -= 1023 + 52
if negative:
mantissa = -mantissa
return (mantissa, exponent)
def decode_binary32(x):
"""
Turn a IEEE 754 binary32 into (mantissa, exponent), except 0.0 and
infinity (positive and negative), which return ZERO, INF, and NEG_INF
respectively.
"""
x = binascii.unhexlify(x)
assert len(x) == 4, repr(x)
[bits] = struct.unpack(b'>I', x)
if bits == 0:
return ZERO
exponent = (bits >> 23) & 0xFF
negative = bits >> 31
low_bits = bits & 0x7FFFFF
if exponent == 0:
mantissa = low_bits
exponent += 1
if mantissa == 0:
return ZERO
elif exponent == 0xFF:
if negative:
return NEG_INF
else:
return INF
else:
mantissa = low_bits | (1 << 23)
exponent -= 127 + 23
if negative:
mantissa = -mantissa
return (mantissa, exponent)
MIN_SUBNORMAL_DOUBLE = Fraction(2) ** -1074
MIN_SUBNORMAL_SINGLE = Fraction(2) ** -149 # XXX unsure
MAX_DOUBLE = (2 - Fraction(2) ** -52) * (2 ** 1023)
MAX_SINGLE = (2 - Fraction(2) ** -23) * (2 ** 127)
MAX_ULP_DOUBLE = 1023 - 52
MAX_ULP_SINGLE = 127 - 23
DOUBLE_ZERO_CUTOFF = MIN_SUBNORMAL_DOUBLE / 2
DOUBLE_INF_CUTOFF = MAX_DOUBLE + 2 ** (MAX_ULP_DOUBLE - 1)
SINGLE_ZERO_CUTOFF = MIN_SUBNORMAL_SINGLE / 2
SINGLE_INF_CUTOFF = MAX_SINGLE + 2 ** (MAX_ULP_SINGLE - 1)
def validate(bin64, bin32, text):
try:
double = decode_binary64(bin64)
except AssertionError:
print(bin64, bin32, text)
raise
single = decode_binary32(bin32)
real = Fraction(text)
if double is ZERO:
if real > DOUBLE_ZERO_CUTOFF:
record_special_error(text, "f64 zero")
elif double is INF:
if real < DOUBLE_INF_CUTOFF:
record_special_error(text, "f64 inf")
elif double is NEG_INF:
if -real < DOUBLE_INF_CUTOFF:
record_special_error(text, "f64 -inf")
elif len(double) == 2:
sig, k = double
validate_normal(text, real, sig, k, "f64")
else:
assert 0, "didn't handle binary64"
if single is ZERO:
if real > SINGLE_ZERO_CUTOFF:
record_special_error(text, "f32 zero")
elif single is INF:
if real < SINGLE_INF_CUTOFF:
record_special_error(text, "f32 inf")
elif single is NEG_INF:
if -real < SINGLE_INF_CUTOFF:
record_special_error(text, "f32 -inf")
elif len(single) == 2:
sig, k = single
validate_normal(text, real, sig, k, "f32")
else:
assert 0, "didn't handle binary32"
def record_special_error(text, descr):
send_error_to_supervisor(text.strip(), "wrongly rounded to", descr)
def validate_normal(text, real, sig, k, kind):
approx = sig * POW2[k]
error = abs(approx - real)
if error > HALF_ULP[k]:
record_normal_error(text, error, k, kind)
def record_normal_error(text, error, k, kind):
one_ulp = HALF_ULP[k + 1]
assert one_ulp == 2 * HALF_ULP[k]
relative_error = error / one_ulp
text = text.strip()
try:
err_repr = float(relative_error)
except ValueError:
err_repr = str(err_repr).replace('/', ' / ')
send_error_to_supervisor(err_repr, "ULP error on", text, "(" + kind + ")")
if __name__ == '__main__':
main()
|
|
import json
from flask.ext.api import status
import flask as fk
from api import app, check_access, upload_handler, load_image
from ddsmdb.common.models import UserModel
from ddsmdb.common.models import ProjectModel
from ddsmdb.common.models import RecordModel
from ddsmdb.common.models import RecordBodyModel
from ddsmdb.common.models import ContainerModel
from ddsmdb.common.tools.basic_auth import requires_auth
import traceback
import mimetypes
# from flask.ext.stormpath import user
API_VERSION = 1
API_URL = '/api/v{0}/private'.format(API_VERSION)
@app.route(API_URL + '/<api_token>/record/push/<project_name>', methods=['POST'])
def push_record(api_token, project_name):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'POST':
# user = UserModel.objects(email=user.email).first_or_404()
project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
# label = db.StringField(max_length=300)
# created_at = db.DateTimeField(default=datetime.datetime.now())
# updated_at = db.DateTimeField(default=datetime.datetime.now())
# system = db.DictField() # {''}
# program = db.DictField() # {'version_control':'git|hg|svn|cvs', 'scope':'local|remote', 'location':'hash|https://remote_version.com/repository_id'}
# inputs = db.ListField(db.DictField()) # [{}]
# outputs = db.ListField(db.DictField()) # [{}]
# dependencies = db.ListField(db.DictField())# [{}]
if len(project.history) > 0:
record = RecordModel(project=project, container=ContainerModel.objects.with_id(project.history[-1]))
else:
record = RecordModel(project=project)
record.label=str(record.id)
if fk.request.data:
try:
data = json.loads(fk.request.data)
if len(data.get('inputs',[])) != 0:
record.inputs = data.get('inputs',[])
del data['inputs']
else:
record.inputs = []
if len(data.get('outputs',[])) != 0:
record.outputs = data.get('outputs',[])
del data['outputs']
else:
record.outputs = []
if len(data.get('dependencies',[])) != 0:
record.dependencies = data.get('dependencies',[])
del data['dependencies']
else:
record.dependencies = []
if len(data.get('system',{})) != 0:
record.system = data.get('system',{})
del data['system']
else:
record.system = {}
if len(data.get('program',{})) != 0:
record.program = data.get('program',{})
del data['program']
else:
record.program = {}
if data.get('status','unknown') != 'unknown':
record.status = data.get('status','unknown')
del data['status']
else:
record.status = 'unknown'
record.update(data)
return fk.Response(str(record.id), status.HTTP_201_CREATED)
except Exception, e:
return fk.make_response(str(traceback.print_exc()), status.HTTP_400_BAD_REQUEST)
else:
return fk.make_response('No metadata provided.', status.HTTP_204_NO_CONTENT)
# if fk.request.files:
# try:
# if fk.request.files['data']:
# data_obj = fk.request.files['data']
# data = json.loads(data_obj.read())
# if len(data.get('image',[])) != 0:
# record.image = data.get('image',[])
# del data['image']
# else:
# record.image = {}
# print "Record Image: "+str(record.image)
# if len(data.get('signature',{})) != 0:
# record.signature = data.get('signature',{})
# del data['signature']
# else:
# record.signature = {}
# print "Record Signature: "+str(record.signature)
# record.update(data)
# except:
# pass
# # if len(record.image) == 0:
# # print "Image to record..."
# try:
# if fk.request.files['docker']:
# image_obj = fk.request.files['docker']
# try:
# record.save()
# upload_handler(current_user, record, image_obj, 'docker')
# print str(record.image)
# except Exception, e:
# traceback.print_exc()
# print "Uploading docker image failed!"
# if fk.request.files['binary']:
# image_obj = fk.request.files['binary']
# try:
# record.save()
# upload_handler(current_user, record, image_obj, 'binary')
# print str(record.image)
# except Exception, e:
# traceback.print_exc()
# print "Uploading executable image failed!"
# if fk.request.files['source']:
# image_obj = fk.request.files['source']
# try:
# record.save()
# upload_handler(current_user, record, image_obj, 'source')
# print str(record.image)
# except Exception, e:
# traceback.print_exc()
# print "Uploading source image failed!"
# except:
# pass
# # else:
# # print "Remote link provided."
# # if len(record.signature) == 0:
# # print "Signature to record..."
# try:
# if fk.request.files['signature']:
# signature_obj = fk.request.files['signature']
# try:
# record.save()
# upload_handler(current_user, record, signature_obj, 'signature')
# print str(record.signature)
# except Exception, e:
# traceback.print_exc()
# print "Uploading signature failed!"
# except:
# pass
# # else:
# # print "Remote link provided."
# return fk.Response(str(record.id), status.HTTP_201_CREATED)
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
# @app.route(API_URL + '/<api_token>/raw/push/<project_name>', methods=['POST'])
# def push_raw(api_token, project_name):
# current_user = check_access(api_token)
# if current_user is not None:
# if fk.request.method == 'POST':
# # user = UserModel.objects(email=user.email).first_or_404()
# project, created = ProjectModel.objects.get_or_create(name=project_name, owner=current_user)
# record = RecordModel(project=project)
# # record.save()
# record.label=str(record.id)
# record.status="started"
# record.reason="No reason specified."
# record.outcome="No outcome expected."
# print record.label
# print record.status
# # if created:
# if fk.request.data:
# try:
# data = json.loads(fk.request.data)
# if created:
# project.private = data.get('private', project.private)
# project.status = {'origin':"root"}
# project.description = data.get('description', "No description provided.")
# project.readme = data.get('readme', "No readme content to show.")
# del data['private']
# del data['status']
# del data['description']
# del data['readme']
# project.save()
# if len(data.get('image',[])) != 0:
# record.image = data.get('image',[])
# del data['image']
# else:
# record.image = {}
# print "Record Image: "+str(record.image)
# if len(data.get('signature',{})) != 0:
# record.signature = data.get('signature',{})
# del data['signature']
# else:
# record.signature = {}
# print "Record Signature: "+str(record.signature)
# record.update(data)
# print record.label
# print record.status
# except:
# print "No json data provided."
# if fk.request.files:
# try:
# if fk.request.files['data']:
# data_obj = fk.request.files['data']
# data = json.loads(data_obj.read())
# if len(data.get('image',[])) != 0:
# record.image = data.get('image',[{}])
# del data['image']
# else:
# record.image = {}
# print "Record Image: "+str(record.image)
# if len(data.get('signature',{})) != 0:
# record.signature = data.get('signature',{})
# del data['signature']
# else:
# record.signature = {}
# print "Record Signature: "+str(record.signature)
# record.update(data)
# except:
# pass
# # if len(record.image) == 0:
# # try:
# # if fk.request.files['image']:
# # image_obj = fk.request.files['image']
# # try:
# # record.save()
# # upload_handler(current_user, record, image_obj, 'record')
# # except Exception, e:
# # traceback.print_exc()
# # print "Uploading image failed!"
# # except Exception, e:
# # traceback.print_exc()
# # else:
# # print "Remote link provided."
# if fk.request.files['docker']:
# image_obj = fk.request.files['docker']
# try:
# record.save()
# upload_handler(current_user, record, image_obj, 'docker')
# print str(record.image)
# except Exception, e:
# traceback.print_exc()
# print "Uploading docker image failed!"
# if fk.request.files['binary']:
# image_obj = fk.request.files['binary']
# try:
# record.save()
# upload_handler(current_user, record, image_obj, 'binary')
# print str(record.image)
# except Exception, e:
# traceback.print_exc()
# print "Uploading executable image failed!"
# if fk.request.files['source']:
# image_obj = fk.request.files['source']
# try:
# record.save()
# upload_handler(current_user, record, image_obj, 'source')
# print str(record.image)
# except Exception, e:
# traceback.print_exc()
# print "Uploading source image failed!"
# # if len(record.signature) == 0:
# try:
# if fk.request.files['signature']:
# signature_obj = fk.request.files['signature']
# try:
# record.save()
# upload_handler(current_user, record, signature_obj, signature)
# except Exception, e:
# traceback.print_exc()
# print "Uploading signature failed!"
# except Exception, e:
# traceback.print_exc()
# # else:
# # print "Remote link provided."
# return fk.Response(str(record.id), status.HTTP_201_CREATED)
# else:
# return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
# else:
# return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
@app.route(API_URL + '/<api_token>/record/sync/<project_name>/<record_id>', methods=['PUT'])
def sync_record(api_token, project_name, record_id):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'PUT':
# user = UserModel.objects(email=user.email).first_or_404()
project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
record = RecordModel.objects.with_id(record_id)
print "In sync..."
if record.project == project:
if fk.request.data:
data = json.loads(fk.request.data)
if data.get('status','unknown') != 'unknown':
record.status = data.get('status','unknown')
del data['status']
else:
record.status = 'unknown'
if len(data.get('inputs',[])) != 0:
for inp in data.get('inputs',[]):
already = False
for current in record.inputs:
if cmp(current, inp) == 0:
already = True
break
if not already:
record.inputs.append(inp)
del data['inputs']
if len(data.get('outputs',[])) != 0:
for out in data.get('outputs',[]):
already = False
for current in record.outputs:
if cmp(current, out) == 0:
already = True
break
if not already:
record.outputs.append(out)
del data['outputs']
if len(data.get('dependencies',[])) != 0:
for dep in data.get('dependencies',[]):
already = False
for current in record.dependencies:
if cmp(current, dep) == 0:
already = True
break
if not already:
record.dependencies.append(dep)
del data['dependencies']
if len(data.get('system',{})) != 0:
record.system = data.get('system',{})
del data['system']
if len(data.get('program',{})) != 0:
record.program = data.get('program',{})
del data['program']
record.update(data)
if fk.request.files:
try:
if fk.request.files['data']:
data_obj = fk.request.files['data']
data = json.loads(data_obj.read())
if data.get('status','unknown') != 'unknown':
record.status = data.get('status','unknown')
del data['status']
else:
record.status = 'unknown'
if len(data.get('inputs',[])) != 0:
for inp in data.get('inputs',[]):
already = False
for current in record.inputs:
if cmp(current, inp) == 0:
already = True
break
if not already:
record.inputs.append(inp)
del data['inputs']
if len(data.get('outputs',[])) != 0:
for out in data.get('outputs',[]):
already = False
for current in record.outputs:
if cmp(current, out) == 0:
already = True
break
if not already:
record.outputs.append(out)
del data['outputs']
if len(data.get('dependencies',[])) != 0:
for dep in data.get('dependencies',[]):
already = False
for current in record.dependencies:
if cmp(current, dep) == 0:
already = True
break
if not already:
record.dependencies.append(dep)
del data['dependencies']
if len(data.get('system',{})) != 0:
record.system = data.get('system',{})
del data['system']
if len(data.get('program',{})) != 0:
record.program = data.get('program',{})
del data['program']
record.update(data)
except:
pass
#To handle source code versioning ourself in case.
# try:
# if fk.request.files['src']:
# src_obj = fk.request.files['src']
# try:
# record.save()
# upload_handler(current_user, record, src_obj, 'record')
# print str(record.src)
# except Exception, e:
# traceback.print_exc()
# print "Uploading image failed!"
# except:
# pass
return fk.Response("Record synchronized.", status.HTTP_201_CREATED)
else:
return fk.Response("Record sync rejected.", status.HTTP_401_UNAUTHORIZED)
else:
return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
#Delete this if the pull with the / at the end really pulls all the project activity.
@app.route(API_URL + '/<api_token>/record/pull/<project_name>', methods=['GET'])
def pull_record_all(api_token, project_name):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'GET':
project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
records = [json.loads(record.summary_json()) for record in RecordModel.objects(project=project)]
return fk.Response(json.dumps({'project':project.to_json(), 'records':records}), mimetype='application/json')
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
# @app.route(API_URL + '/<api_token>/<user_id>/record/clone/<project_name>/<record_id>', methods=['GET'])
# def clone_record(api_token, user_id, project_name, record_id):
# current_user = check_access(api_token)
# if current_user is not None:
# if fk.request.method == 'GET':
# owner = UserModel.objects(id=user_id).first_or_404()
# project = ProjectModel.objects(name=project_name, owner=owner).first_or_404()
# if not project.private:
# record = RecordModel.objects.with_id(record_id)
# clo_project = ProjectModel.objects(name=project_name, owner=current_user).first()
# if clo_project == None:
# clo_project = project.clone()
# clo_project.user = current_user
# clo_project.status = {'origin':str(user_id)+":"+project_name+":"+str(record_id)}
# clo_project.save()
# clo_record = RecordModel.objects.with_id(record_id)
# if clo_record == None or (clo_record != None and clo_record.project != clo_project):
# clo_record = record.clone()
# clo_record.project = clo_project
# clo_record.save()
# return fk.Response("Record cloned.", status.HTTP_201_CREATED)
# else:
# return fk.Response("Record already cloned!", status.HTTP_201_CREATED)
# else:
# return fk.make_response('Access denied. Private project.', status.HTTP_401_UNAUTHORIZED)
# else:
# return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
# else:
# return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
@app.route(API_URL + '/<api_token>/record/display/<project_name>/<record_id>', methods=['GET'])
def pull_record_single(api_token, project_name, record_id):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'GET':
# user = UserModel.objects(email=user.email).first_or_404()
record = RecordModel.objects.with_id(record_id)
project = ProjectModel.objects.with_id(record.project.id)
if (project.private and (project.owner == current_user)) or (not project.private):
if record_id is not None:
return fk.Response(record.to_json(), mimetype='application/json')
else:
project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
records = [json.loads(record.summary_json()) for record in RecordModel.objects(project=project)]
return fk.Response(json.dumps({'project':project.to_json(), 'records':records}), mimetype='application/json')
else:
return fk.Response('Record pull rejected.', status.HTTP_401_UNAUTHORIZED)
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
@app.route(API_URL + '/<api_token>/record/pull/<project_name>/<record_id>', methods=['GET'])
def pull_record(api_token, project_name, record_id):
current_user = check_access(api_token)
if current_user is not None:
if fk.request.method == 'GET':
# user = UserModel.objects(email=user.email).first_or_404()
record = RecordModel.objects.with_id(record_id)
project = ProjectModel.objects.with_id(record.project.id)
if (project.private and (project.owner == current_user)) or (not project.private):
if record_id is not None:
if record.container:
container = record.container
if container.image['location']:
image = load_image(container)
print image[1]
return fk.send_file(
image[0],
mimetypes.guess_type(image[1])[0],
as_attachment=True,
attachment_filename=str(current_user.id)+"-"+project_name+"-"+str(record_id)+"-record.tar",
)
else:
return fk.make_response('Empty location. Nothing to pull from here!', status.HTTP_204_NO_CONTENT)
else:
return fk.make_response('No container image. Nothing to pull from here!', status.HTTP_204_NO_CONTENT)
else:
return fk.make_response('Nothing to pull from here!', status.HTTP_204_NO_CONTENT)
else:
return fk.Response('Record pull rejected.', status.HTTP_401_UNAUTHORIZED)
else:
return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
else:
return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
#Delete this if the one remove with / at the end really delete all the records.
# @app.route(API_URL + '/<api_token>/record/remove/<project_name>', methods=['DELETE'])
# def remove_all_record(api_token, project_name):
# current_user = check_access(api_token)
# if current_user is not None:
# if fk.request.method == 'DELETE':
# # user = UserModel.objects(email=user.email).first_or_404()
# project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
# records = RecordModel.objects(project=project)
# for record in records:
# record.delete()
# return fk.Response("All records deleted.", status.HTTP_201_CREATED)
# else:
# return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
# else:
# return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
# @app.route(API_URL + '/<api_token>/record/remove/<project_name>/<record_id>', methods=['DELETE'])
# def remove_record(api_token, project_name, record_id):
# current_user = check_access(api_token)
# if current_user is not None:
# if fk.request.method == 'DELETE':
# # user = UserModel.objects(email=user.email).first_or_404()
# project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
# record = RecordModel.objects.with_id(record_id)
# if record.project == project:
# if record_id is not None:
# record.delete()
# return fk.Response("Record deleted.", status.HTTP_201_CREATED)
# else:
# records = RecordModel.objects(project=project)
# for record in records:
# record.delete()
# return fk.Response("All records deleted.", status.HTTP_201_CREATED)
# else:
# return fk.Response("Record delete rejected.", status.HTTP_401_UNAUTHORIZED)
# else:
# return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
# else:
# return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
# @app.route(API_URL + '/<api_token>/record/dashboard/<project_name>', methods=['GET'])
# def dashboard_record(api_token, project_name):
# current_user = check_access(api_token)
# if current_user is not None:
# if fk.request.method == 'GET':
# project = ProjectModel.objects(name=project_name, owner=current_user).first_or_404()
# records = [json.loads(record.summary_json()) for record in RecordModel.objects(project=project)]
# return fk.Response(json.dumps({'project':project.to_json(), 'records':records}), mimetype='application/json')
# else:
# return fk.make_response('Method not allowed.', status.HTTP_405_METHOD_NOT_ALLOWED)
# else:
# return fk.Response('Unauthorized api token.', status.HTTP_401_UNAUTHORIZED)
|
|
from __future__ import absolute_import
import six
from mistune import markdown
from collections import OrderedDict
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from sentry.models import IntegrationExternalProject, OrganizationIntegration, User
from sentry.integrations.issues import IssueSyncMixin
from sentry.shared_integrations.exceptions import ApiUnauthorized, ApiError
class VstsIssueSync(IssueSyncMixin):
description = "Integrate Azure DevOps work items by linking a project."
slug = "vsts"
conf_key = slug
issue_fields = frozenset(["id", "title", "url"])
done_categories = frozenset(["Resolved", "Completed"])
def get_persisted_default_config_fields(self):
return ["project", "work_item_type"]
def create_default_repo_choice(self, default_repo):
# default_repo should be the project_id
project = self.get_client().get_project(self.instance, default_repo)
return (project["id"], project["name"])
def get_project_choices(self, group, **kwargs):
client = self.get_client()
try:
projects = client.get_projects(self.instance)
except (ApiError, ApiUnauthorized, KeyError) as e:
self.raise_error(e)
project_choices = [(project["id"], project["name"]) for project in projects]
params = kwargs.get("params", {})
defaults = self.get_project_defaults(group.project_id)
try:
default_project = params.get(
"project", defaults.get("project") or project_choices[0][0]
)
except IndexError:
return None, project_choices
# If a project has been selected outside of the default list of
# projects, stick it onto the front of the list so that it can be
# selected.
try:
next(True for r in project_choices if r[0] == default_project)
except StopIteration:
try:
project_choices.insert(0, self.create_default_repo_choice(default_project))
except (ApiError, ApiUnauthorized):
return None, project_choices
return default_project, project_choices
def get_work_item_choices(self, project, group):
client = self.get_client()
try:
item_categories = client.get_work_item_categories(self.instance, project)["value"]
except (ApiError, ApiUnauthorized, KeyError) as e:
self.raise_error(e)
# we want to maintain ordering of the items
item_type_map = OrderedDict()
for item in item_categories:
for item_type_object in item["workItemTypes"]:
# the type is the last part of the url
item_type = item_type_object["url"].split("/")[-1]
# we can have duplicates so need to dedupe
if item_type not in item_type_map:
item_type_map[item_type] = item_type_object["name"]
item_tuples = list(item_type_map.items())
# try to get the default from either the last value used or from the first item on the list
defaults = self.get_project_defaults(group.project_id)
try:
default_item_type = defaults.get("work_item_type") or item_tuples[0][0]
except IndexError:
return None, item_tuples
return default_item_type, item_tuples
def get_create_issue_config(self, group, user, **kwargs):
kwargs["link_referrer"] = "vsts_integration"
fields = super(VstsIssueSync, self).get_create_issue_config(group, user, **kwargs)
# Azure/VSTS has BOTH projects and repositories. A project can have many repositories.
# Workitems (issues) are associated with the project not the repository.
default_project, project_choices = self.get_project_choices(group, **kwargs)
work_item_choices = []
default_work_item = None
if default_project:
default_work_item, work_item_choices = self.get_work_item_choices(
default_project, group
)
return [
{
"name": "project",
"required": True,
"type": "choice",
"choices": project_choices,
"defaultValue": default_project,
"label": _("Project"),
"placeholder": default_project or _("MyProject"),
"updatesForm": True,
},
{
"name": "work_item_type",
"required": True,
"type": "choice",
"choices": work_item_choices,
"defaultValue": default_work_item,
"label": _("Work Item Type"),
"placeholder": _("Bug"),
},
] + fields
def get_link_issue_config(self, group, **kwargs):
fields = super(VstsIssueSync, self).get_link_issue_config(group, **kwargs)
org = group.organization
autocomplete_url = reverse("sentry-extensions-vsts-search", args=[org.slug, self.model.id])
for field in fields:
if field["name"] == "externalIssue":
field["url"] = autocomplete_url
field["type"] = "select"
return fields
def get_issue_url(self, key, **kwargs):
return "%s_workitems/edit/%s" % (self.instance, six.text_type(key))
def create_issue(self, data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
"""
project_id = data.get("project")
if project_id is None:
raise ValueError("Azure DevOps expects project")
client = self.get_client()
title = data["title"]
description = data["description"]
item_type = data["work_item_type"]
try:
created_item = client.create_work_item(
instance=self.instance,
project=project_id,
item_type=item_type,
title=title,
# Descriptions cannot easily be seen. So, a comment will be added as well.
description=markdown(description),
comment=markdown(description),
)
except Exception as e:
self.raise_error(e)
project_name = created_item["fields"]["System.AreaPath"]
return {
"key": six.text_type(created_item["id"]),
"title": title,
"description": description,
"metadata": {"display_name": "%s#%s" % (project_name, created_item["id"])},
}
def get_issue(self, issue_id, **kwargs):
client = self.get_client()
work_item = client.get_work_item(self.instance, issue_id)
return {
"key": six.text_type(work_item["id"]),
"title": work_item["fields"]["System.Title"],
"description": work_item["fields"].get("System.Description"),
"metadata": {
"display_name": "%s#%s" % (work_item["fields"]["System.AreaPath"], work_item["id"])
},
}
def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs):
client = self.get_client()
assignee = None
if assign is True:
sentry_emails = [email.email.lower() for email in user.get_verified_emails()]
continuation_token = None
while True:
vsts_users = client.get_users(self.model.name, continuation_token)
continuation_token = vsts_users.headers.get("X-MS-ContinuationToken")
for vsts_user in vsts_users["value"]:
vsts_email = vsts_user.get(u"mailAddress")
if vsts_email and vsts_email.lower() in sentry_emails:
assignee = vsts_user["mailAddress"]
break
if not continuation_token:
break
if assignee is None:
# TODO(lb): Email people when this happens
self.logger.info(
"vsts.assignee-not-found",
extra={
"integration_id": external_issue.integration_id,
"user_id": user.id,
"issue_key": external_issue.key,
},
)
return
try:
client.update_work_item(self.instance, external_issue.key, assigned_to=assignee)
except (ApiUnauthorized, ApiError):
self.logger.info(
"vsts.failed-to-assign",
extra={
"integration_id": external_issue.integration_id,
"user_id": user.id,
"issue_key": external_issue.key,
},
)
def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs):
client = self.get_client()
work_item = client.get_work_item(self.instance, external_issue.key)
# For some reason, vsts doesn't include the project id
# in the work item response.
# TODO(jess): figure out if there's a better way to do this
vsts_project_name = work_item["fields"]["System.TeamProject"]
vsts_projects = client.get_projects(self.instance)
vsts_project_id = None
for p in vsts_projects:
if p["name"] == vsts_project_name:
vsts_project_id = p["id"]
break
try:
external_project = IntegrationExternalProject.objects.get(
external_id=vsts_project_id,
organization_integration_id__in=OrganizationIntegration.objects.filter(
organization_id=external_issue.organization_id,
integration_id=external_issue.integration_id,
),
)
except IntegrationExternalProject.DoesNotExist:
self.logger.info(
"vsts.external-project-not-found",
extra={
"integration_id": external_issue.integration_id,
"is_resolved": is_resolved,
"issue_key": external_issue.key,
},
)
return
status = (
external_project.resolved_status if is_resolved else external_project.unresolved_status
)
try:
client.update_work_item(self.instance, external_issue.key, state=status)
except (ApiUnauthorized, ApiError) as error:
self.logger.info(
"vsts.failed-to-change-status",
extra={
"integration_id": external_issue.integration_id,
"is_resolved": is_resolved,
"issue_key": external_issue.key,
"exception": error,
},
)
def should_unresolve(self, data):
done_states = self.get_done_states(data["project"])
return (
data["old_state"] in done_states
or data["old_state"] is None
and not data["new_state"] in done_states
)
def should_resolve(self, data):
done_states = self.get_done_states(data["project"])
return not data["old_state"] in done_states and data["new_state"] in done_states
def get_done_states(self, project):
client = self.get_client()
try:
all_states = client.get_work_item_states(self.instance, project)["value"]
except ApiError as err:
self.logger.info(
"vsts.get-done-states.failed",
extra={"integration_id": self.model.id, "exception": err},
)
return []
done_states = [
state["name"] for state in all_states if state["category"] in self.done_categories
]
return done_states
def get_issue_display_name(self, external_issue):
if external_issue.metadata is None:
return ""
return external_issue.metadata["display_name"]
def create_comment(self, issue_id, user_id, group_note):
comment = group_note.data["text"]
quoted_comment = self.create_comment_attribution(user_id, comment)
return self.get_client().update_work_item(self.instance, issue_id, comment=quoted_comment)
def create_comment_attribution(self, user_id, comment_text):
# VSTS uses markdown or xml
# https://docs.microsoft.com/en-us/microsoftteams/platform/concepts/bots/bots-text-formats
user = User.objects.get(id=user_id)
attribution = "%s wrote:\n\n" % user.name
quoted_comment = "%s<blockquote>%s</blockquote>" % (attribution, comment_text)
return quoted_comment
def update_comment(self, issue_id, user_id, external_comment_id, comment_text):
# Azure does not support updating comments
pass
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RouteFiltersOperations(object):
"""RouteFiltersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
route_filter_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
route_filter_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
"""Gets the specified route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param expand: Expands referenced express route bgp peering resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_filter_parameters, 'RouteFilter')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
route_filter_name, # type: str
route_filter_parameters, # type: "_models.RouteFilter"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.RouteFilter"]
"""Creates or updates a route filter in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param route_filter_parameters: Parameters supplied to the create or update route filter
operation.
:type route_filter_parameters: ~azure.mgmt.network.v2021_02_01.models.RouteFilter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either RouteFilter or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2021_02_01.models.RouteFilter]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_filter_name=route_filter_name,
route_filter_parameters=route_filter_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
route_filter_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.RouteFilter"
"""Updates tags of a route filter.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_filter_name: The name of the route filter.
:type route_filter_name: str
:param parameters: Parameters supplied to update route filter tags.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteFilter, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.RouteFilter
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilter"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeFilterName': self._serialize.url("route_filter_name", route_filter_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteFilter', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters/{routeFilterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterListResult"]
"""Gets all route filters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeFilters'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RouteFilterListResult"]
"""Gets all route filters in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteFilterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_02_01.models.RouteFilterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteFilterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RouteFilterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeFilters'} # type: ignore
|
|
from copy import deepcopy
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Dataset, Variable, concat
from xarray.core import dtypes, merge
from . import (
InaccessibleArray,
assert_array_equal,
assert_equal,
assert_identical,
raises_regex,
requires_dask,
)
from .test_dataset import create_test_data
def test_concat_compat():
ds1 = Dataset(
{
"has_x_y": (("y", "x"), [[1, 2]]),
"has_x": ("x", [1, 2]),
"no_x_y": ("z", [1, 2]),
},
coords={"x": [0, 1], "y": [0], "z": [-1, -2]},
)
ds2 = Dataset(
{
"has_x_y": (("y", "x"), [[3, 4]]),
"has_x": ("x", [1, 2]),
"no_x_y": (("q", "z"), [[1, 2]]),
},
coords={"x": [0, 1], "y": [1], "z": [-1, -2], "q": [0]},
)
result = concat([ds1, ds2], dim="y", data_vars="minimal", compat="broadcast_equals")
assert_equal(ds2.no_x_y, result.no_x_y.transpose())
for var in ["has_x", "no_x_y"]:
assert "y" not in result[var].dims and "y" not in result[var].coords
with raises_regex(ValueError, "coordinates in some datasets but not others"):
concat([ds1, ds2], dim="q")
with raises_regex(ValueError, "'q' is not present in all datasets"):
concat([ds2, ds1], dim="q")
class TestConcatDataset:
@pytest.fixture
def data(self):
return create_test_data().drop_dims("dim3")
def rectify_dim_order(self, data, dataset):
# return a new dataset with all variable dimensions transposed into
# the order in which they are found in `data`
return Dataset(
{k: v.transpose(*data[k].dims) for k, v in dataset.data_vars.items()},
dataset.coords,
attrs=dataset.attrs,
)
@pytest.mark.parametrize("coords", ["different", "minimal"])
@pytest.mark.parametrize("dim", ["dim1", "dim2"])
def test_concat_simple(self, data, dim, coords):
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
assert_identical(data, concat(datasets, dim, coords=coords))
def test_concat_merge_variables_present_in_some_datasets(self, data):
# coordinates present in some datasets but not others
ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1})
ds2 = Dataset(data_vars={"a": ("y", [0.2])}, coords={"z": 0.2})
actual = concat([ds1, ds2], dim="y", coords="minimal")
expected = Dataset({"a": ("y", [0.1, 0.2])}, coords={"x": 0.1, "z": 0.2})
assert_identical(expected, actual)
# data variables present in some datasets but not others
split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
data0, data1 = deepcopy(split_data)
data1["foo"] = ("bar", np.random.randn(10))
actual = concat([data0, data1], "dim1")
expected = data.copy().assign(foo=data1.foo)
assert_identical(expected, actual)
def test_concat_2(self, data):
dim = "dim2"
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in data.coords.items() if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
assert_identical(data, self.rectify_dim_order(data, actual))
@pytest.mark.parametrize("coords", ["different", "minimal", "all"])
@pytest.mark.parametrize("dim", ["dim1", "dim2"])
def test_concat_coords_kwarg(self, data, dim, coords):
data = data.copy(deep=True)
# make sure the coords argument behaves as expected
data.coords["extra"] = ("dim4", np.arange(3))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords=coords)
if coords == "all":
expected = np.array([data["extra"].values for _ in range(data.dims[dim])])
assert_array_equal(actual["extra"].values, expected)
else:
assert_equal(data["extra"], actual["extra"])
def test_concat(self, data):
split_data = [
data.isel(dim1=slice(3)),
data.isel(dim1=3),
data.isel(dim1=slice(4, None)),
]
assert_identical(data, concat(split_data, "dim1"))
def test_concat_dim_precedence(self, data):
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data["dim1"]).rename("dim1")
datasets = [g for _, g in data.groupby("dim1", squeeze=False)]
expected = data.copy()
expected["dim1"] = dim
assert_identical(expected, concat(datasets, dim))
def test_concat_data_vars(self):
data = Dataset({"foo": ("x", np.random.randn(10))})
objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ["minimal", "different", "all", [], ["foo"]]:
actual = concat(objs, dim="x", data_vars=data_vars)
assert_identical(data, actual)
def test_concat_coords(self):
data = Dataset({"foo": ("x", np.random.randn(10))})
expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5))
objs = [
data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1),
]
for coords in ["different", "all", ["c"]]:
actual = concat(objs, dim="x", coords=coords)
assert_identical(expected, actual)
for coords in ["minimal", []]:
with raises_regex(merge.MergeError, "conflicting values"):
concat(objs, dim="x", coords=coords)
def test_concat_constant_index(self):
# GH425
ds1 = Dataset({"foo": 1.5}, {"y": 1})
ds2 = Dataset({"foo": 2.5}, {"y": 1})
expected = Dataset({"foo": ("y", [1.5, 2.5]), "y": [1, 1]})
for mode in ["different", "all", ["foo"]]:
actual = concat([ds1, ds2], "y", data_vars=mode)
assert_identical(expected, actual)
with raises_regex(merge.MergeError, "conflicting values"):
# previously dim="y", and raised error which makes no sense.
# "foo" has dimension "y" so minimal should concatenate it?
concat([ds1, ds2], "new_dim", data_vars="minimal")
def test_concat_size0(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, "dim1")
assert_identical(data, actual)
actual = concat(split_data[::-1], "dim1")
assert_identical(data, actual)
def test_concat_autoalign(self):
ds1 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])})
ds2 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 3])])})
actual = concat([ds1, ds2], "y")
expected = Dataset(
{
"foo": DataArray(
[[1, 2, np.nan], [1, np.nan, 2]],
dims=["y", "x"],
coords={"x": [1, 2, 3]},
)
}
)
assert_identical(expected, actual)
def test_concat_errors(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
with raises_regex(ValueError, "must supply at least one"):
concat([], "dim1")
with raises_regex(ValueError, "Cannot specify both .*='different'"):
concat(
[data, data], dim="concat_dim", data_vars="different", compat="override"
)
with raises_regex(ValueError, "must supply at least one"):
concat([], "dim1")
with raises_regex(ValueError, "are not coordinates"):
concat([data, data], "new_dim", coords=["not_found"])
with raises_regex(ValueError, "global attributes not"):
data0, data1 = deepcopy(split_data)
data1.attrs["foo"] = "bar"
concat([data0, data1], "dim1", compat="identical")
assert_identical(data, concat([data0, data1], "dim1", compat="equals"))
with raises_regex(ValueError, "compat.* invalid"):
concat(split_data, "dim1", compat="foobar")
with raises_regex(ValueError, "unexpected value for"):
concat([data, data], "new_dim", coords="foobar")
with raises_regex(ValueError, "coordinate in some datasets but not others"):
concat([Dataset({"x": 0}), Dataset({"x": [1]})], dim="z")
with raises_regex(ValueError, "coordinate in some datasets but not others"):
concat([Dataset({"x": 0}), Dataset({}, {"x": 1})], dim="z")
def test_concat_join_kwarg(self):
ds1 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]})
ds2 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]})
expected = {}
expected["outer"] = Dataset(
{"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])},
{"x": [0, 1], "y": [0, 0.0001]},
)
expected["inner"] = Dataset(
{"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []}
)
expected["left"] = Dataset(
{"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
expected["right"] = Dataset(
{"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0.0001]},
)
expected["override"] = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
with raises_regex(ValueError, "indexes along dimension 'y'"):
actual = concat([ds1, ds2], join="exact", dim="x")
for join in expected:
actual = concat([ds1, ds2], join=join, dim="x")
assert_equal(actual, expected[join])
# regression test for #3681
actual = concat([ds1.drop("x"), ds2.drop("x")], join="override", dim="y")
expected = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2))}, coords={"y": [0, 0.0001]}
)
assert_identical(actual, expected)
def test_concat_combine_attrs_kwarg(self):
ds1 = Dataset({"a": ("x", [0])}, coords={"x": [0]}, attrs={"b": 42})
ds2 = Dataset({"a": ("x", [0])}, coords={"x": [1]}, attrs={"b": 42, "c": 43})
expected = {}
expected["drop"] = Dataset({"a": ("x", [0, 0])}, {"x": [0, 1]})
expected["no_conflicts"] = Dataset(
{"a": ("x", [0, 0])}, {"x": [0, 1]}, {"b": 42, "c": 43}
)
expected["override"] = Dataset({"a": ("x", [0, 0])}, {"x": [0, 1]}, {"b": 42})
with raises_regex(ValueError, "combine_attrs='identical'"):
actual = concat([ds1, ds2], dim="x", combine_attrs="identical")
with raises_regex(ValueError, "combine_attrs='no_conflicts'"):
ds3 = ds2.copy(deep=True)
ds3.attrs["b"] = 44
actual = concat([ds1, ds3], dim="x", combine_attrs="no_conflicts")
for combine_attrs in expected:
actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
assert_identical(actual, expected[combine_attrs])
def test_concat_promote_shape(self):
# mixed dims within variables
objs = [Dataset({}, {"x": 0}), Dataset({"x": [1]})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1]})
assert_identical(actual, expected)
objs = [Dataset({"x": [0]}), Dataset({}, {"x": 1})]
actual = concat(objs, "x")
assert_identical(actual, expected)
# mixed dims between variables
objs = [Dataset({"x": [2], "y": 3}), Dataset({"x": [4], "y": 5})]
actual = concat(objs, "x")
expected = Dataset({"x": [2, 4], "y": ("x", [3, 5])})
assert_identical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1]}, {"y": ("x", [-2])})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1]}, {"y": ("x", [-1, -2])})
assert_identical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])})
assert_identical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [
Dataset({"z": ("x", [-1])}, {"x": [0], "y": [0]}),
Dataset({"z": ("y", [1])}, {"x": [1], "y": [0]}),
]
actual = concat(objs, "x")
expected = Dataset({"z": (("x", "y"), [[-1], [1]])}, {"x": [0, 1], "y": [0]})
assert_identical(actual, expected)
def test_concat_do_not_promote(self):
# GH438
objs = [
Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}),
Dataset({"y": ("t", [2])}, {"x": 1, "t": [0]}),
]
expected = Dataset({"y": ("t", [1, 2])}, {"x": 1, "t": [0, 0]})
actual = concat(objs, "t")
assert_identical(expected, actual)
objs = [
Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}),
Dataset({"y": ("t", [2])}, {"x": 2, "t": [0]}),
]
with pytest.raises(ValueError):
concat(objs, "t", coords="minimal")
def test_concat_dim_is_variable(self):
objs = [Dataset({"x": 0}), Dataset({"x": 1})]
coord = Variable("y", [3, 4])
expected = Dataset({"x": ("y", [0, 1]), "y": [3, 4]})
actual = concat(objs, coord)
assert_identical(actual, expected)
def test_concat_multiindex(self):
x = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]])
expected = Dataset({"x": x})
actual = concat(
[expected.isel(x=slice(2)), expected.isel(x=slice(2, None))], "x"
)
assert expected.equals(actual)
assert isinstance(actual.x.to_index(), pd.MultiIndex)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_concat_fill_value(self, fill_value):
datasets = [
Dataset({"a": ("x", [2, 3]), "x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "x": [0, 1]}),
]
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = Dataset(
{"a": (("t", "x"), [[fill_value, 2, 3], [1, 2, fill_value]])},
{"x": [0, 1, 2]},
)
actual = concat(datasets, dim="t", fill_value=fill_value)
assert_identical(actual, expected)
class TestConcatDataArray:
def test_concat(self):
ds = Dataset(
{
"foo": (["x", "y"], np.random.random((2, 3))),
"bar": (["x", "y"], np.random.random((2, 3))),
},
{"x": [0, 1]},
)
foo = ds["foo"]
bar = ds["bar"]
# from dataset array:
expected = DataArray(
np.array([foo.values, bar.values]),
dims=["w", "x", "y"],
coords={"x": [0, 1]},
)
actual = concat([foo, bar], "w")
assert_equal(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby("x")]
stacked = concat(grouped, ds["x"])
assert_identical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, ds.indexes["x"])
assert_identical(foo, stacked)
actual = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True)
expected = foo[:2].rename({"x": "concat_dim"})
assert_identical(expected, actual)
actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({"x": "concat_dim"})
assert_identical(expected, actual)
with raises_regex(ValueError, "not identical"):
concat([foo, bar], dim="w", compat="identical")
with raises_regex(ValueError, "not a valid argument"):
concat([foo, bar], dim="w", data_vars="minimal")
def test_concat_encoding(self):
# Regression test for GH1297
ds = Dataset(
{
"foo": (["x", "y"], np.random.random((2, 3))),
"bar": (["x", "y"], np.random.random((2, 3))),
},
{"x": [0, 1]},
)
foo = ds["foo"]
foo.encoding = {"complevel": 5}
ds.encoding = {"unlimited_dims": "x"}
assert concat([foo, foo], dim="x").encoding == foo.encoding
assert concat([ds, ds], dim="x").encoding == ds.encoding
@requires_dask
def test_concat_lazy(self):
import dask.array as da
arrays = [
DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3), dims=["x", "y"]
)
for _ in range(2)
]
# should not raise
combined = concat(arrays, dim="z")
assert combined.shape == (2, 3, 3)
assert combined.dims == ("z", "x", "y")
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_concat_fill_value(self, fill_value):
foo = DataArray([1, 2], coords=[("x", [1, 2])])
bar = DataArray([1, 2], coords=[("x", [1, 3])])
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = DataArray(
[[1, 2, fill_value], [1, fill_value, 2]],
dims=["y", "x"],
coords={"x": [1, 2, 3]},
)
actual = concat((foo, bar), dim="y", fill_value=fill_value)
assert_identical(actual, expected)
def test_concat_join_kwarg(self):
ds1 = Dataset(
{"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]}
).to_array()
ds2 = Dataset(
{"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]}
).to_array()
expected = {}
expected["outer"] = Dataset(
{"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])},
{"x": [0, 1], "y": [0, 0.0001]},
)
expected["inner"] = Dataset(
{"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []}
)
expected["left"] = Dataset(
{"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
expected["right"] = Dataset(
{"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0.0001]},
)
expected["override"] = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
with raises_regex(ValueError, "indexes along dimension 'y'"):
actual = concat([ds1, ds2], join="exact", dim="x")
for join in expected:
actual = concat([ds1, ds2], join=join, dim="x")
assert_equal(actual, expected[join].to_array())
def test_concat_combine_attrs_kwarg(self):
da1 = DataArray([0], coords=[("x", [0])], attrs={"b": 42})
da2 = DataArray([0], coords=[("x", [1])], attrs={"b": 42, "c": 43})
expected = {}
expected["drop"] = DataArray([0, 0], coords=[("x", [0, 1])])
expected["no_conflicts"] = DataArray(
[0, 0], coords=[("x", [0, 1])], attrs={"b": 42, "c": 43}
)
expected["override"] = DataArray(
[0, 0], coords=[("x", [0, 1])], attrs={"b": 42}
)
with raises_regex(ValueError, "combine_attrs='identical'"):
actual = concat([da1, da2], dim="x", combine_attrs="identical")
with raises_regex(ValueError, "combine_attrs='no_conflicts'"):
da3 = da2.copy(deep=True)
da3.attrs["b"] = 44
actual = concat([da1, da3], dim="x", combine_attrs="no_conflicts")
for combine_attrs in expected:
actual = concat([da1, da2], dim="x", combine_attrs=combine_attrs)
assert_identical(actual, expected[combine_attrs])
@pytest.mark.parametrize("attr1", ({"a": {"meta": [10, 20, 30]}}, {"a": [1, 2, 3]}, {}))
@pytest.mark.parametrize("attr2", ({"a": [1, 2, 3]}, {}))
def test_concat_attrs_first_variable(attr1, attr2):
arrs = [
DataArray([[1], [2]], dims=["x", "y"], attrs=attr1),
DataArray([[3], [4]], dims=["x", "y"], attrs=attr2),
]
concat_attrs = concat(arrs, "y").attrs
assert concat_attrs == attr1
def test_concat_merge_single_non_dim_coord():
da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1})
da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]})
expected = DataArray(range(1, 7), dims="x", coords={"x": range(1, 7), "y": 1})
for coords in ["different", "minimal"]:
actual = concat([da1, da2], "x", coords=coords)
assert_identical(actual, expected)
with raises_regex(ValueError, "'y' is not present in all datasets."):
concat([da1, da2], dim="x", coords="all")
da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1})
da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]})
da3 = DataArray([7, 8, 9], dims="x", coords={"x": [7, 8, 9], "y": 1})
for coords in ["different", "all"]:
with raises_regex(ValueError, "'y' not present in all datasets"):
concat([da1, da2, da3], dim="x")
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.extensions import portbindings
from neutron.plugins.ml2 import driver_api as api
from neutron.tests import base
NETWORK_ID = "fake_network"
PORT_ID = "fake_port"
class FakeNetworkContext(api.NetworkContext):
def __init__(self, segments):
self._network_segments = segments
@property
def current(self):
return {'id': NETWORK_ID}
@property
def original(self):
return None
@property
def network_segments(self):
return self._network_segments
class FakePortContext(api.PortContext):
def __init__(self, agent_type, agents, segments,
vnic_type=portbindings.VNIC_NORMAL):
self._agent_type = agent_type
self._agents = agents
self._network_context = FakeNetworkContext(segments)
self._bound_vnic_type = vnic_type
self._bound_segment_id = None
self._bound_vif_type = None
self._bound_vif_details = None
@property
def current(self):
return {'id': PORT_ID,
'binding:vnic_type': self._bound_vnic_type}
@property
def original(self):
return None
@property
def network(self):
return self._network_context
@property
def bound_segment(self):
if self._bound_segment_id:
for segment in self._network_context.network_segments:
if segment[api.ID] == self._bound_segment_id:
return segment
@property
def original_bound_segment(self):
return None
@property
def bound_driver(self):
return None
@property
def original_bound_driver(self):
return None
def host_agents(self, agent_type):
if agent_type == self._agent_type:
return self._agents
else:
return []
def set_binding(self, segment_id, vif_type, vif_details):
self._bound_segment_id = segment_id
self._bound_vif_type = vif_type
self._bound_vif_details = vif_details
class AgentMechanismBaseTestCase(base.BaseTestCase):
# These following must be overriden for the specific mechanism
# driver being tested:
VIF_TYPE = None
VIF_DETAILS = None
AGENT_TYPE = None
AGENTS = None
AGENTS_DEAD = None
AGENTS_BAD = None
def _check_unbound(self, context):
self.assertIsNone(context._bound_segment_id)
self.assertIsNone(context._bound_vif_type)
self.assertIsNone(context._bound_vif_details)
def _check_bound(self, context, segment):
self.assertEqual(context._bound_segment_id, segment[api.ID])
self.assertEqual(context._bound_vif_type, self.VIF_TYPE)
vif_details = context._bound_vif_details
self.assertIsNotNone(vif_details)
# NOTE(r-mibu): The following five lines are just for backward
# compatibility. In this class, HAS_PORT_FILTER has been replaced
# by VIF_DETAILS which can be set expected vif_details to check,
# but all replacement of HAS_PORT_FILTER in successor has not been
# completed.
if self.VIF_DETAILS is None:
expected = getattr(self, 'CAP_PORT_FILTER', None)
port_filter = vif_details[portbindings.CAP_PORT_FILTER]
self.assertEqual(expected, port_filter)
return
self.assertEqual(self.VIF_DETAILS, vif_details)
class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase):
UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'}]
def test_unknown_type(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.UNKNOWN_TYPE_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase):
LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'local_segment_id',
api.NETWORK_TYPE: 'local'}]
def test_type_local(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.LOCAL_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.LOCAL_SEGMENTS[1])
def test_type_local_dead(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_DEAD,
self.LOCAL_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase):
FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'flat_segment_id',
api.NETWORK_TYPE: 'flat',
api.PHYSICAL_NETWORK: 'fake_physical_network'}]
def test_type_flat(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.FLAT_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.FLAT_SEGMENTS[1])
def test_type_flat_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.FLAT_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase):
VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'fake_physical_network',
api.SEGMENTATION_ID: 1234}]
def test_type_vlan(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.VLAN_SEGMENTS[1])
def test_type_vlan_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.VLAN_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismGreTestCase(AgentMechanismBaseTestCase):
GRE_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'gre_segment_id',
api.NETWORK_TYPE: 'gre',
api.SEGMENTATION_ID: 1234}]
def test_type_gre(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.GRE_SEGMENTS[1])
def test_type_gre_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
|
|
"""empty message
Revision ID: 13484841a0c
Revises: None
Create Date: 2016-03-09 11:54:39.334813
"""
# revision identifiers, used by Alembic.
revision = '13484841a0c'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('category_type', sa.Integer(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('logo', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['category.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_category_logo'), 'category', ['logo'], unique=False)
op.create_index(op.f('ix_category_name'), 'category', ['name'], unique=False)
op.create_index(op.f('ix_category_parent_id'), 'category', ['parent_id'], unique=False)
op.create_table('currency_rate',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('base_currency', sa.Unicode(length=3), nullable=True),
sa.Column('currency', sa.Unicode(length=3), nullable=True),
sa.Column('rate', sa.Numeric(precision=13, scale=4), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_currency_rate_base_currency'), 'currency_rate', ['base_currency'], unique=False)
op.create_index(op.f('ix_currency_rate_currency'), 'currency_rate', ['currency'], unique=False)
op.create_table('group',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_tag_name'), 'tag', ['name'], unique=False)
op.create_table('group_category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('category_type', sa.Integer(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('logo', sa.String(length=255), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.ForeignKeyConstraint(['parent_id'], ['group_category.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_group_category_group_id'), 'group_category', ['group_id'], unique=False)
op.create_index(op.f('ix_group_category_logo'), 'group_category', ['logo'], unique=False)
op.create_index(op.f('ix_group_category_name'), 'group_category', ['name'], unique=False)
op.create_index(op.f('ix_group_category_parent_id'), 'group_category', ['parent_id'], unique=False)
op.create_table('group_currency',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('symbol', sa.String(length=3), nullable=True),
sa.Column('rate', sa.Integer(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_group_currency_date_modified'), 'group_currency', ['date_modified'], unique=False)
op.create_index(op.f('ix_group_currency_group_id'), 'group_currency', ['group_id'], unique=False)
op.create_index(op.f('ix_group_currency_name'), 'group_currency', ['name'], unique=False)
op.create_index(op.f('ix_group_currency_symbol'), 'group_currency', ['symbol'], unique=False)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=128), nullable=False),
sa.Column('first_name', sa.String(length=256), nullable=True),
sa.Column('last_name', sa.String(length=256), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('super_user', sa.Boolean(), nullable=False),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('last_login', sa.DateTime(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_first_name'), 'user', ['first_name'], unique=False)
op.create_index(op.f('ix_user_group_id'), 'user', ['group_id'], unique=False)
op.create_index(op.f('ix_user_last_name'), 'user', ['last_name'], unique=False)
op.create_table('account',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('currency_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['currency_id'], ['group_currency.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_account_currency_id'), 'account', ['currency_id'], unique=False)
op.create_index(op.f('ix_account_name'), 'account', ['name'], unique=False)
op.create_index(op.f('ix_account_user_id'), 'account', ['user_id'], unique=False)
op.create_table('app',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('secret', sa.String(length=255), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_app_name'), 'app', ['name'], unique=False)
op.create_index(op.f('ix_app_secret'), 'app', ['secret'], unique=False)
op.create_index(op.f('ix_app_user_id'), 'app', ['user_id'], unique=False)
op.create_table('record',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('amount', sa.Numeric(precision=13, scale=4), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('record_type', sa.Integer(), nullable=True),
sa.Column('payment_method', sa.Integer(), nullable=True),
sa.Column('date', sa.DateTime(timezone=True), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('account_id', sa.Integer(), nullable=True),
sa.Column('currency_id', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['account_id'], ['account.id'], ),
sa.ForeignKeyConstraint(['category_id'], ['group_category.id'], ),
sa.ForeignKeyConstraint(['currency_id'], ['group_currency.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_record_account_id'), 'record', ['account_id'], unique=False)
op.create_index(op.f('ix_record_category_id'), 'record', ['category_id'], unique=False)
op.create_index(op.f('ix_record_currency_id'), 'record', ['currency_id'], unique=False)
op.create_index(op.f('ix_record_date'), 'record', ['date'], unique=False)
op.create_index(op.f('ix_record_description'), 'record', ['description'], unique=False)
op.create_index(op.f('ix_record_user_id'), 'record', ['user_id'], unique=False)
op.create_table('token',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('app_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('token', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['app_id'], ['app.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_token_app_id'), 'token', ['app_id'], unique=False)
op.create_index(op.f('ix_token_token'), 'token', ['token'], unique=False)
op.create_index(op.f('ix_token_user_id'), 'token', ['user_id'], unique=False)
op.create_table('record_tag',
sa.Column('record_id', sa.Integer(), nullable=True),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['record_id'], ['record.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('record_tag')
op.drop_index(op.f('ix_token_user_id'), table_name='token')
op.drop_index(op.f('ix_token_token'), table_name='token')
op.drop_index(op.f('ix_token_app_id'), table_name='token')
op.drop_table('token')
op.drop_index(op.f('ix_record_user_id'), table_name='record')
op.drop_index(op.f('ix_record_description'), table_name='record')
op.drop_index(op.f('ix_record_date'), table_name='record')
op.drop_index(op.f('ix_record_currency_id'), table_name='record')
op.drop_index(op.f('ix_record_category_id'), table_name='record')
op.drop_index(op.f('ix_record_account_id'), table_name='record')
op.drop_table('record')
op.drop_index(op.f('ix_app_user_id'), table_name='app')
op.drop_index(op.f('ix_app_secret'), table_name='app')
op.drop_index(op.f('ix_app_name'), table_name='app')
op.drop_table('app')
op.drop_index(op.f('ix_account_user_id'), table_name='account')
op.drop_index(op.f('ix_account_name'), table_name='account')
op.drop_index(op.f('ix_account_currency_id'), table_name='account')
op.drop_table('account')
op.drop_index(op.f('ix_user_last_name'), table_name='user')
op.drop_index(op.f('ix_user_group_id'), table_name='user')
op.drop_index(op.f('ix_user_first_name'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_index(op.f('ix_group_currency_symbol'), table_name='group_currency')
op.drop_index(op.f('ix_group_currency_name'), table_name='group_currency')
op.drop_index(op.f('ix_group_currency_group_id'), table_name='group_currency')
op.drop_index(op.f('ix_group_currency_date_modified'), table_name='group_currency')
op.drop_table('group_currency')
op.drop_index(op.f('ix_group_category_parent_id'), table_name='group_category')
op.drop_index(op.f('ix_group_category_name'), table_name='group_category')
op.drop_index(op.f('ix_group_category_logo'), table_name='group_category')
op.drop_index(op.f('ix_group_category_group_id'), table_name='group_category')
op.drop_table('group_category')
op.drop_index(op.f('ix_tag_name'), table_name='tag')
op.drop_table('tag')
op.drop_table('group')
op.drop_index(op.f('ix_currency_rate_currency'), table_name='currency_rate')
op.drop_index(op.f('ix_currency_rate_base_currency'), table_name='currency_rate')
op.drop_table('currency_rate')
op.drop_index(op.f('ix_category_parent_id'), table_name='category')
op.drop_index(op.f('ix_category_name'), table_name='category')
op.drop_index(op.f('ix_category_logo'), table_name='category')
op.drop_table('category')
### end Alembic commands ###
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import json
import fixtures
from oslotest import mockpatch
import six
from stevedore import extension
from heat.common import exception
from heat.common import template_format
from heat.engine.cfn import functions as cfn_funcs
from heat.engine.cfn import template as cfn_t
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import function
from heat.engine.hot import template as hot_t
from heat.engine import parameters
from heat.engine import rsrc_defn
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests.openstack.nova import fakes as fakes_nova
from heat.tests import utils
mapping_template = template_format.parse('''{
"AWSTemplateFormatVersion" : "2010-09-09",
"Mappings" : {
"ValidMapping" : {
"TestKey" : { "TestValue" : "wibble" }
},
"InvalidMapping" : {
"ValueList" : [ "foo", "bar" ],
"ValueString" : "baz"
},
"MapList": [ "foo", { "bar" : "baz" } ],
"MapString": "foobar"
}
}''')
empty_template = template_format.parse('''{
"HeatTemplateFormatVersion" : "2012-12-12",
}''')
parameter_template = template_format.parse('''{
"HeatTemplateFormatVersion" : "2012-12-12",
"Parameters" : {
"foo" : { "Type" : "String" },
"blarg" : { "Type" : "String", "Default": "quux" }
}
}''')
resource_template = template_format.parse('''{
"HeatTemplateFormatVersion" : "2012-12-12",
"Resources" : {
"foo" : { "Type" : "GenericResourceType" },
"blarg" : { "Type" : "GenericResourceType" }
}
}''')
def join(raw):
tmpl = template.Template(mapping_template)
return function.resolve(tmpl.parse(None, raw))
class DummyClass(object):
metadata = None
def metadata_get(self):
return self.metadata
def metadata_set(self, metadata):
self.metadata = metadata
class TemplatePluginFixture(fixtures.Fixture):
def __init__(self, templates={}):
super(TemplatePluginFixture, self).__init__()
self.templates = [extension.Extension(k, None, v, None)
for (k, v) in templates.items()]
def _get_template_extension_manager(self):
return extension.ExtensionManager.make_test_instance(self.templates)
def setUp(self):
super(TemplatePluginFixture, self).setUp()
def clear_template_classes():
template._template_classes = None
clear_template_classes()
self.useFixture(mockpatch.PatchObject(
template,
'_get_template_extension_manager',
new=self._get_template_extension_manager))
self.addCleanup(clear_template_classes)
class TestTemplatePluginManager(common.HeatTestCase):
def test_template_NEW_good(self):
class NewTemplate(template.Template):
SECTIONS = (VERSION, MAPPINGS) = ('NEWTemplateFormatVersion',
'__undefined__')
RESOURCES = 'thingies'
def param_schemata(self):
pass
def get_section_name(self, section):
pass
def parameters(self, stack_identifier, user_params):
pass
def validate_resource_definitions(self, stack):
pass
def resource_definitions(self, stack):
pass
def add_resource(self, definition, name=None):
pass
def __getitem__(self, section):
return {}
def functions(self):
return {}
class NewTemplatePrint(function.Function):
def result(self):
return 'always this'
self.useFixture(TemplatePluginFixture(
{'NEWTemplateFormatVersion.2345-01-01': NewTemplate}))
t = {'NEWTemplateFormatVersion': '2345-01-01'}
tmpl = template.Template(t)
err = tmpl.validate()
self.assertIsNone(err)
class TestTemplateVersion(common.HeatTestCase):
versions = (('heat_template_version', '2013-05-23'),
('HeatTemplateFormatVersion', '2012-12-12'),
('AWSTemplateFormatVersion', '2010-09-09'))
def test_hot_version(self):
tmpl = {
'heat_template_version': '2013-05-23',
'foo': 'bar',
'parameters': {}
}
self.assertEqual(('heat_template_version', '2013-05-23'),
template.get_version(tmpl, self.versions))
def test_cfn_version(self):
tmpl = {
'AWSTemplateFormatVersion': '2010-09-09',
'foo': 'bar',
'Parameters': {}
}
self.assertEqual(('AWSTemplateFormatVersion', '2010-09-09'),
template.get_version(tmpl, self.versions))
def test_heat_cfn_version(self):
tmpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'foo': 'bar',
'Parameters': {}
}
self.assertEqual(('HeatTemplateFormatVersion', '2012-12-12'),
template.get_version(tmpl, self.versions))
def test_missing_version(self):
tmpl = {
'foo': 'bar',
'Parameters': {}
}
ex = self.assertRaises(exception.InvalidTemplateVersion,
template.get_version, tmpl, self.versions)
self.assertEqual('The template version is invalid: Template version '
'was not provided', six.text_type(ex))
def test_ambiguous_version(self):
tmpl = {
'AWSTemplateFormatVersion': '2010-09-09',
'HeatTemplateFormatVersion': '2012-12-12',
'foo': 'bar',
'Parameters': {}
}
self.assertRaises(exception.InvalidTemplateVersion,
template.get_version, tmpl, self.versions)
class ParserTest(common.HeatTestCase):
def test_list(self):
raw = ['foo', 'bar', 'baz']
parsed = join(raw)
for i in six.moves.xrange(len(raw)):
self.assertEqual(raw[i], parsed[i])
self.assertIsNot(raw, parsed)
def test_dict(self):
raw = {'foo': 'bar', 'blarg': 'wibble'}
parsed = join(raw)
for k in raw:
self.assertEqual(raw[k], parsed[k])
self.assertIsNot(raw, parsed)
def test_dict_list(self):
raw = {'foo': ['bar', 'baz'], 'blarg': 'wibble'}
parsed = join(raw)
self.assertEqual(raw['blarg'], parsed['blarg'])
for i in six.moves.xrange(len(raw['foo'])):
self.assertEqual(raw['foo'][i], parsed['foo'][i])
self.assertIsNot(raw, parsed)
self.assertIsNot(raw['foo'], parsed['foo'])
def test_list_dict(self):
raw = [{'foo': 'bar', 'blarg': 'wibble'}, 'baz', 'quux']
parsed = join(raw)
for i in six.moves.xrange(1, len(raw)):
self.assertEqual(raw[i], parsed[i])
for k in raw[0]:
self.assertEqual(raw[0][k], parsed[0][k])
self.assertIsNot(raw, parsed)
self.assertIsNot(raw[0], parsed[0])
def test_join(self):
raw = {'Fn::Join': [' ', ['foo', 'bar', 'baz']]}
self.assertEqual('foo bar baz', join(raw))
def test_join_none(self):
raw = {'Fn::Join': [' ', ['foo', None, 'baz']]}
self.assertEqual('foo baz', join(raw))
def test_join_list(self):
raw = [{'Fn::Join': [' ', ['foo', 'bar', 'baz']]}, 'blarg', 'wibble']
parsed = join(raw)
self.assertEqual('foo bar baz', parsed[0])
for i in six.moves.xrange(1, len(raw)):
self.assertEqual(raw[i], parsed[i])
self.assertIsNot(raw, parsed)
def test_join_dict_val(self):
raw = {'quux': {'Fn::Join': [' ', ['foo', 'bar', 'baz']]},
'blarg': 'wibble'}
parsed = join(raw)
self.assertEqual('foo bar baz', parsed['quux'])
self.assertEqual(raw['blarg'], parsed['blarg'])
self.assertIsNot(raw, parsed)
class TestTemplateValidate(common.HeatTestCase):
def test_template_validate_cfn_check_t_digest(self):
t = {
'AWSTemplateFormatVersion': '2010-09-09',
'Description': 'foo',
'Parameters': {},
'Mappings': {},
'Resources': {
'server': {
'Type': 'OS::Nova::Server'
}
},
'Outputs': {},
}
tmpl = template.Template(t)
self.assertIsNone(tmpl.t_digest)
tmpl.validate()
self.assertEqual(
hashlib.sha256(six.text_type(t).encode('utf-8')).hexdigest(),
tmpl.t_digest, 'invalid template digest')
def test_template_validate_cfn_good(self):
t = {
'AWSTemplateFormatVersion': '2010-09-09',
'Description': 'foo',
'Parameters': {},
'Mappings': {},
'Resources': {
'server': {
'Type': 'OS::Nova::Server'
}
},
'Outputs': {},
}
tmpl = template.Template(t)
err = tmpl.validate()
self.assertIsNone(err)
# test with alternate version key
t = {
'HeatTemplateFormatVersion': '2012-12-12',
'Description': 'foo',
'Parameters': {},
'Mappings': {},
'Resources': {
'server': {
'Type': 'OS::Nova::Server'
}
},
'Outputs': {},
}
tmpl = template.Template(t)
err = tmpl.validate()
self.assertIsNone(err)
def test_template_validate_cfn_bad_section(self):
t = {
'AWSTemplateFormatVersion': '2010-09-09',
'Description': 'foo',
'Parameteers': {},
'Mappings': {},
'Resources': {
'server': {
'Type': 'OS::Nova::Server'
}
},
'Outputs': {},
}
tmpl = template.Template(t)
err = self.assertRaises(exception.InvalidTemplateSection,
tmpl.validate)
self.assertIn('Parameteers', six.text_type(err))
def test_template_validate_cfn_empty(self):
t = template_format.parse('''
AWSTemplateFormatVersion: 2010-09-09
Parameters:
Resources:
Outputs:
''')
tmpl = template.Template(t)
err = tmpl.validate()
self.assertIsNone(err)
def test_template_validate_hot_check_t_digest(self):
t = {
'heat_template_version': '2015-04-30',
'description': 'foo',
'parameters': {},
'resources': {
'server': {
'type': 'OS::Nova::Server'
}
},
'outputs': {},
}
tmpl = template.Template(t)
self.assertIsNone(tmpl.t_digest)
tmpl.validate()
self.assertEqual(hashlib.sha256(
six.text_type(t).encode('utf-8')).hexdigest(),
tmpl.t_digest, 'invalid template digest')
def test_template_validate_hot_good(self):
t = {
'heat_template_version': '2013-05-23',
'description': 'foo',
'parameters': {},
'resources': {
'server': {
'type': 'OS::Nova::Server'
}
},
'outputs': {},
}
tmpl = template.Template(t)
err = tmpl.validate()
self.assertIsNone(err)
def test_template_validate_hot_bad_section(self):
t = {
'heat_template_version': '2013-05-23',
'description': 'foo',
'parameteers': {},
'resources': {
'server': {
'type': 'OS::Nova::Server'
}
},
'outputs': {},
}
tmpl = template.Template(t)
err = self.assertRaises(exception.InvalidTemplateSection,
tmpl.validate)
self.assertIn('parameteers', six.text_type(err))
class TemplateTest(common.HeatTestCase):
def setUp(self):
super(TemplateTest, self).setUp()
self.ctx = utils.dummy_context()
@staticmethod
def resolve(snippet, template, stack=None):
return function.resolve(template.parse(stack, snippet))
def test_defaults(self):
empty = template.Template(empty_template)
self.assertNotIn('AWSTemplateFormatVersion', empty)
self.assertEqual('No description', empty['Description'])
self.assertEqual({}, empty['Mappings'])
self.assertEqual({}, empty['Resources'])
self.assertEqual({}, empty['Outputs'])
def test_aws_version(self):
tmpl = template.Template(mapping_template)
self.assertEqual(('AWSTemplateFormatVersion', '2010-09-09'),
tmpl.version)
def test_heat_version(self):
tmpl = template.Template(resource_template)
self.assertEqual(('HeatTemplateFormatVersion', '2012-12-12'),
tmpl.version)
def test_invalid_hot_version(self):
invalid_hot_version_tmp = template_format.parse(
'''{
"heat_template_version" : "2012-12-12",
}''')
init_ex = self.assertRaises(exception.InvalidTemplateVersion,
template.Template, invalid_hot_version_tmp)
valid_versions = ['2013-05-23', '2014-10-16',
'2015-04-30', '2015-10-15', '2016-04-08']
ex_error_msg = ('The template version is invalid: '
'"heat_template_version: 2012-12-12". '
'"heat_template_version" should be one of: %s'
% ', '.join(valid_versions))
self.assertEqual(ex_error_msg, six.text_type(init_ex))
def test_invalid_version_not_in_hot_versions(self):
invalid_hot_version_tmp = template_format.parse(
'''{
"heat_template_version" : "2012-12-12",
}''')
versions = {
('heat_template_version', '2013-05-23'): hot_t.HOTemplate20130523,
('heat_template_version', '2013-06-23'): hot_t.HOTemplate20130523
}
temp_copy = copy.deepcopy(template._template_classes)
template._template_classes = versions
init_ex = self.assertRaises(exception.InvalidTemplateVersion,
template.Template, invalid_hot_version_tmp)
ex_error_msg = ('The template version is invalid: '
'"heat_template_version: 2012-12-12". '
'"heat_template_version" should be '
'one of: 2013-05-23, 2013-06-23')
self.assertEqual(ex_error_msg, six.text_type(init_ex))
template._template_classes = temp_copy
def test_invalid_aws_version(self):
invalid_aws_version_tmp = template_format.parse(
'''{
"AWSTemplateFormatVersion" : "2012-12-12",
}''')
init_ex = self.assertRaises(exception.InvalidTemplateVersion,
template.Template, invalid_aws_version_tmp)
ex_error_msg = ('The template version is invalid: '
'"AWSTemplateFormatVersion: 2012-12-12". '
'"AWSTemplateFormatVersion" should be: 2010-09-09')
self.assertEqual(ex_error_msg, six.text_type(init_ex))
def test_invalid_version_not_in_aws_versions(self):
invalid_aws_version_tmp = template_format.parse(
'''{
"AWSTemplateFormatVersion" : "2012-12-12",
}''')
versions = {
('AWSTemplateFormatVersion', '2010-09-09'): cfn_t.CfnTemplate,
('AWSTemplateFormatVersion', '2011-06-23'): cfn_t.CfnTemplate
}
temp_copy = copy.deepcopy(template._template_classes)
template._template_classes = versions
init_ex = self.assertRaises(exception.InvalidTemplateVersion,
template.Template, invalid_aws_version_tmp)
ex_error_msg = ('The template version is invalid: '
'"AWSTemplateFormatVersion: 2012-12-12". '
'"AWSTemplateFormatVersion" should be '
'one of: 2010-09-09, 2011-06-23')
self.assertEqual(ex_error_msg, six.text_type(init_ex))
template._template_classes = temp_copy
def test_invalid_heat_version(self):
invalid_heat_version_tmp = template_format.parse(
'''{
"HeatTemplateFormatVersion" : "2010-09-09",
}''')
init_ex = self.assertRaises(exception.InvalidTemplateVersion,
template.Template,
invalid_heat_version_tmp)
ex_error_msg = ('The template version is invalid: '
'"HeatTemplateFormatVersion: 2010-09-09". '
'"HeatTemplateFormatVersion" should be: 2012-12-12')
self.assertEqual(ex_error_msg, six.text_type(init_ex))
def test_invalid_version_not_in_heat_versions(self):
invalid_heat_version_tmp = template_format.parse(
'''{
"HeatTemplateFormatVersion" : "2010-09-09",
}''')
versions = {
('HeatTemplateFormatVersion', '2012-12-12'): cfn_t.CfnTemplate,
('HeatTemplateFormatVersion', '2014-12-12'): cfn_t.CfnTemplate
}
temp_copy = copy.deepcopy(template._template_classes)
template._template_classes = versions
init_ex = self.assertRaises(exception.InvalidTemplateVersion,
template.Template,
invalid_heat_version_tmp)
ex_error_msg = ('The template version is invalid: '
'"HeatTemplateFormatVersion: 2010-09-09". '
'"HeatTemplateFormatVersion" should be '
'one of: 2012-12-12, 2014-12-12')
self.assertEqual(ex_error_msg, six.text_type(init_ex))
template._template_classes = temp_copy
def test_invalid_template(self):
scanner_error = '''
1
Mappings:
ValidMapping:
TestKey: TestValue
'''
parser_error = '''
Mappings:
ValidMapping:
TestKey: {TestKey1: "Value1" TestKey2: "Value2"}
'''
self.assertRaises(ValueError, template_format.parse, scanner_error)
self.assertRaises(ValueError, template_format.parse, parser_error)
def test_invalid_section(self):
tmpl = template.Template({'HeatTemplateFormatVersion': '2012-12-12',
'Foo': ['Bar']})
self.assertNotIn('Foo', tmpl)
def test_find_in_map(self):
tmpl = template.Template(mapping_template)
stk = stack.Stack(self.ctx, 'test', tmpl)
find = {'Fn::FindInMap': ["ValidMapping", "TestKey", "TestValue"]}
self.assertEqual("wibble", self.resolve(find, tmpl, stk))
def test_find_in_invalid_map(self):
tmpl = template.Template(mapping_template)
stk = stack.Stack(self.ctx, 'test', tmpl)
finds = ({'Fn::FindInMap': ["InvalidMapping", "ValueList", "foo"]},
{'Fn::FindInMap': ["InvalidMapping", "ValueString", "baz"]},
{'Fn::FindInMap': ["MapList", "foo", "bar"]},
{'Fn::FindInMap': ["MapString", "foo", "bar"]})
for find in finds:
self.assertRaises((KeyError, TypeError), self.resolve,
find, tmpl, stk)
def test_bad_find_in_map(self):
tmpl = template.Template(mapping_template)
stk = stack.Stack(self.ctx, 'test', tmpl)
finds = ({'Fn::FindInMap': "String"},
{'Fn::FindInMap': {"Dict": "String"}},
{'Fn::FindInMap': ["ShortList", "foo"]},
{'Fn::FindInMap': ["ReallyShortList"]})
for find in finds:
self.assertRaises(KeyError, self.resolve, find, tmpl, stk)
def test_param_refs(self):
env = environment.Environment({'foo': 'bar', 'blarg': 'wibble'})
tmpl = template.Template(parameter_template, env=env)
stk = stack.Stack(self.ctx, 'test', tmpl)
p_snippet = {"Ref": "foo"}
self.assertEqual("bar", self.resolve(p_snippet, tmpl, stk))
def test_param_ref_missing(self):
env = environment.Environment({'foo': 'bar'})
tmpl = template.Template(parameter_template, env=env)
stk = stack.Stack(self.ctx, 'test', tmpl)
tmpl.env = environment.Environment({})
stk.parameters = parameters.Parameters(stk.identifier(), tmpl)
snippet = {"Ref": "foo"}
self.assertRaises(exception.UserParameterMissing,
self.resolve,
snippet, tmpl, stk)
def test_resource_refs(self):
tmpl = template.Template(resource_template)
stk = stack.Stack(self.ctx, 'test', tmpl)
self.m.StubOutWithMock(stk['foo'], 'FnGetRefId')
stk['foo'].FnGetRefId().MultipleTimes().AndReturn('bar')
self.m.ReplayAll()
r_snippet = {"Ref": "foo"}
self.assertEqual("bar", self.resolve(r_snippet, tmpl, stk))
self.m.VerifyAll()
def test_resource_refs_param(self):
tmpl = template.Template(resource_template)
stk = stack.Stack(self.ctx, 'test', tmpl)
p_snippet = {"Ref": "baz"}
parsed = tmpl.parse(stk, p_snippet)
self.assertIsInstance(parsed, cfn_funcs.ParamRef)
def test_select_from_list(self):
tmpl = template.Template(empty_template)
data = {"Fn::Select": ["1", ["foo", "bar"]]}
self.assertEqual("bar", self.resolve(data, tmpl))
def test_select_from_list_integer_index(self):
tmpl = template.Template(empty_template)
data = {"Fn::Select": [1, ["foo", "bar"]]}
self.assertEqual("bar", self.resolve(data, tmpl))
def test_select_from_list_out_of_bound(self):
tmpl = template.Template(empty_template)
data = {"Fn::Select": ["0", ["foo", "bar"]]}
self.assertEqual("foo", self.resolve(data, tmpl))
data = {"Fn::Select": ["1", ["foo", "bar"]]}
self.assertEqual("bar", self.resolve(data, tmpl))
data = {"Fn::Select": ["2", ["foo", "bar"]]}
self.assertEqual("", self.resolve(data, tmpl))
def test_select_from_dict(self):
tmpl = template.Template(empty_template)
data = {"Fn::Select": ["red", {"red": "robin", "re": "foo"}]}
self.assertEqual("robin", self.resolve(data, tmpl))
def test_select_int_from_dict(self):
tmpl = template.Template(empty_template)
data = {"Fn::Select": ["2", {"1": "bar", "2": "foo"}]}
self.assertEqual("foo", self.resolve(data, tmpl))
def test_select_from_none(self):
tmpl = template.Template(empty_template)
data = {"Fn::Select": ["red", None]}
self.assertEqual("", self.resolve(data, tmpl))
def test_select_from_dict_not_existing(self):
tmpl = template.Template(empty_template)
data = {"Fn::Select": ["green", {"red": "robin", "re": "foo"}]}
self.assertEqual("", self.resolve(data, tmpl))
def test_select_from_serialized_json_map(self):
tmpl = template.Template(empty_template)
js = json.dumps({"red": "robin", "re": "foo"})
data = {"Fn::Select": ["re", js]}
self.assertEqual("foo", self.resolve(data, tmpl))
def test_select_from_serialized_json_list(self):
tmpl = template.Template(empty_template)
js = json.dumps(["foo", "fee", "fum"])
data = {"Fn::Select": ["0", js]}
self.assertEqual("foo", self.resolve(data, tmpl))
def test_select_empty_string(self):
tmpl = template.Template(empty_template)
data = {"Fn::Select": ["0", '']}
self.assertEqual("", self.resolve(data, tmpl))
data = {"Fn::Select": ["1", '']}
self.assertEqual("", self.resolve(data, tmpl))
data = {"Fn::Select": ["one", '']}
self.assertEqual("", self.resolve(data, tmpl))
def test_join(self):
tmpl = template.Template(empty_template)
join = {"Fn::Join": [" ", ["foo", "bar"]]}
self.assertEqual("foo bar", self.resolve(join, tmpl))
def test_split_ok(self):
tmpl = template.Template(empty_template)
data = {"Fn::Split": [";", "foo; bar; achoo"]}
self.assertEqual(['foo', ' bar', ' achoo'], self.resolve(data, tmpl))
def test_split_no_delim_in_str(self):
tmpl = template.Template(empty_template)
data = {"Fn::Split": [";", "foo, bar, achoo"]}
self.assertEqual(['foo, bar, achoo'], self.resolve(data, tmpl))
def test_base64(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::Base64": "foobar"}
# For now, the Base64 function just returns the original text, and
# does not convert to base64 (see issue #133)
self.assertEqual("foobar", self.resolve(snippet, tmpl))
def test_get_azs(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::GetAZs": ""}
self.assertEqual(["nova"], self.resolve(snippet, tmpl))
def test_get_azs_with_stack(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::GetAZs": ""}
stk = stack.Stack(self.ctx, 'test_stack',
template.Template(empty_template))
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
fc = fakes_nova.FakeClient()
nova.NovaClientPlugin._create().AndReturn(fc)
self.m.ReplayAll()
self.assertEqual(["nova1"], self.resolve(snippet, tmpl, stk))
def test_replace_string_values(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::Replace": [
{'$var1': 'foo', '%var2%': 'bar'},
'$var1 is %var2%'
]}
self.assertEqual('foo is bar', self.resolve(snippet, tmpl))
def test_replace_number_values(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::Replace": [
{'$var1': 1, '%var2%': 2},
'$var1 is not %var2%'
]}
self.assertEqual('1 is not 2', self.resolve(snippet, tmpl))
snippet = {"Fn::Replace": [
{'$var1': 1.3, '%var2%': 2.5},
'$var1 is not %var2%'
]}
self.assertEqual('1.3 is not 2.5', self.resolve(snippet, tmpl))
def test_replace_none_values(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::Replace": [
{'$var1': None, '${var2}': None},
'"$var1" is "${var2}"'
]}
self.assertEqual('"" is ""', self.resolve(snippet, tmpl))
def test_replace_missing_key(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::Replace": [
{'$var1': 'foo', 'var2': 'bar'},
'"$var1" is "${var3}"'
]}
self.assertEqual('"foo" is "${var3}"', self.resolve(snippet, tmpl))
def test_replace_param_values(self):
env = environment.Environment({'foo': 'wibble'})
tmpl = template.Template(parameter_template, env=env)
stk = stack.Stack(self.ctx, 'test_stack', tmpl)
snippet = {"Fn::Replace": [
{'$var1': {'Ref': 'foo'}, '%var2%': {'Ref': 'blarg'}},
'$var1 is %var2%'
]}
self.assertEqual('wibble is quux', self.resolve(snippet, tmpl, stk))
def test_member_list2map_good(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::MemberListToMap": [
'Name', 'Value', ['.member.0.Name=metric',
'.member.0.Value=cpu',
'.member.1.Name=size',
'.member.1.Value=56']]}
self.assertEqual({'metric': 'cpu', 'size': '56'},
self.resolve(snippet, tmpl))
def test_member_list2map_good2(self):
tmpl = template.Template(empty_template)
snippet = {"Fn::MemberListToMap": [
'Key', 'Value', ['.member.2.Key=metric',
'.member.2.Value=cpu',
'.member.5.Key=size',
'.member.5.Value=56']]}
self.assertEqual({'metric': 'cpu', 'size': '56'},
self.resolve(snippet, tmpl))
def test_resource_facade(self):
metadata_snippet = {'Fn::ResourceFacade': 'Metadata'}
deletion_policy_snippet = {'Fn::ResourceFacade': 'DeletionPolicy'}
update_policy_snippet = {'Fn::ResourceFacade': 'UpdatePolicy'}
parent_resource = DummyClass()
parent_resource.metadata_set({"foo": "bar"})
parent_resource.t = rsrc_defn.ResourceDefinition(
'parent', 'SomeType',
deletion_policy=rsrc_defn.ResourceDefinition.RETAIN,
update_policy={"blarg": "wibble"})
parent_resource.stack = stack.Stack(self.ctx, 'toplevel_stack',
template.Template(empty_template))
stk = stack.Stack(self.ctx, 'test_stack',
template.Template(empty_template),
parent_resource='parent', owner_id=45)
stk._parent_stack = dict(parent=parent_resource)
self.assertEqual({"foo": "bar"},
self.resolve(metadata_snippet, stk.t, stk))
self.assertEqual('Retain',
self.resolve(deletion_policy_snippet, stk.t, stk))
self.assertEqual({"blarg": "wibble"},
self.resolve(update_policy_snippet, stk.t, stk))
def test_resource_facade_function(self):
deletion_policy_snippet = {'Fn::ResourceFacade': 'DeletionPolicy'}
parent_resource = DummyClass()
parent_resource.metadata_set({"foo": "bar"})
parent_resource.stack = stack.Stack(self.ctx, 'toplevel_stack',
template.Template(empty_template))
del_policy = cfn_funcs.Join(parent_resource.stack,
'Fn::Join', ['eta', ['R', 'in']])
parent_resource.t = rsrc_defn.ResourceDefinition(
'parent', 'SomeType',
deletion_policy=del_policy)
stk = stack.Stack(self.ctx, 'test_stack',
template.Template(empty_template),
parent_resource='parent')
stk._parent_stack = dict(parent=parent_resource)
self.assertEqual('Retain',
self.resolve(deletion_policy_snippet, stk.t, stk))
def test_resource_facade_invalid_arg(self):
snippet = {'Fn::ResourceFacade': 'wibble'}
stk = stack.Stack(self.ctx, 'test_stack',
template.Template(empty_template))
error = self.assertRaises(ValueError,
self.resolve, snippet, stk.t, stk)
self.assertIn(list(six.iterkeys(snippet))[0], six.text_type(error))
def test_resource_facade_missing_deletion_policy(self):
snippet = {'Fn::ResourceFacade': 'DeletionPolicy'}
parent_resource = DummyClass()
parent_resource.metadata_set({"foo": "bar"})
parent_resource.t = rsrc_defn.ResourceDefinition('parent', 'SomeType')
parent_resource.stack = stack.Stack(self.ctx, 'toplevel_stack',
template.Template(empty_template))
stk = stack.Stack(self.ctx, 'test_stack',
template.Template(empty_template),
parent_resource='parent', owner_id=78)
stk._parent_stack = dict(parent=parent_resource)
self.assertEqual('Delete', self.resolve(snippet, stk.t, stk))
def test_prevent_parameters_access(self):
expected_description = "This can be accessed"
tmpl = template.Template({
'AWSTemplateFormatVersion': '2010-09-09',
'Description': expected_description,
'Parameters': {
'foo': {'Type': 'String', 'Required': True}
}
})
self.assertEqual(expected_description, tmpl['Description'])
keyError = self.assertRaises(KeyError, tmpl.__getitem__, 'Parameters')
self.assertIn("can not be accessed directly", six.text_type(keyError))
def test_parameters_section_not_iterable(self):
expected_description = "This can be accessed"
tmpl = template.Template({
'AWSTemplateFormatVersion': '2010-09-09',
'Description': expected_description,
'Parameters': {
'foo': {'Type': 'String', 'Required': True}
}
})
self.assertEqual(expected_description, tmpl['Description'])
self.assertNotIn('Parameters', six.iterkeys(tmpl))
def test_add_resource(self):
cfn_tpl = template_format.parse('''
AWSTemplateFormatVersion: 2010-09-09
Resources:
resource1:
Type: AWS::EC2::Instance
Properties:
property1: value1
Metadata:
foo: bar
DependsOn: dummy
DeletionPolicy: Retain
UpdatePolicy:
foo: bar
resource2:
Type: AWS::EC2::Instance
''')
source = template.Template(cfn_tpl)
empty = template.Template(copy.deepcopy(empty_template))
stk = stack.Stack(self.ctx, 'test_stack', source)
for defn in six.itervalues(source.resource_definitions(stk)):
empty.add_resource(defn)
self.assertEqual(cfn_tpl['Resources'], empty.t['Resources'])
def test_create_empty_template_default_version(self):
empty_template = template.Template.create_empty_template()
self.assertEqual(hot_t.HOTemplate20150430, empty_template.__class__)
self.assertEqual({}, empty_template['parameter_groups'])
self.assertEqual({}, empty_template['resources'])
self.assertEqual({}, empty_template['outputs'])
def test_create_empty_template_returns_correct_version(self):
t = template_format.parse('''
AWSTemplateFormatVersion: 2010-09-09
Parameters:
Resources:
Outputs:
''')
aws_tmpl = template.Template(t)
empty_template = template.Template.create_empty_template(
version=aws_tmpl.version)
self.assertEqual(aws_tmpl.__class__, empty_template.__class__)
self.assertEqual({}, empty_template['Mappings'])
self.assertEqual({}, empty_template['Resources'])
self.assertEqual({}, empty_template['Outputs'])
t = template_format.parse('''
HeatTemplateFormatVersion: 2012-12-12
Parameters:
Resources:
Outputs:
''')
heat_tmpl = template.Template(t)
empty_template = template.Template.create_empty_template(
version=heat_tmpl.version)
self.assertEqual(heat_tmpl.__class__, empty_template.__class__)
self.assertEqual({}, empty_template['Mappings'])
self.assertEqual({}, empty_template['Resources'])
self.assertEqual({}, empty_template['Outputs'])
t = template_format.parse('''
heat_template_version: 2015-04-30
parameter_groups:
resources:
outputs:
''')
hot_tmpl = template.Template(t)
empty_template = template.Template.create_empty_template(
version=hot_tmpl.version)
self.assertEqual(hot_tmpl.__class__, empty_template.__class__)
self.assertEqual({}, empty_template['parameter_groups'])
self.assertEqual({}, empty_template['resources'])
self.assertEqual({}, empty_template['outputs'])
class TemplateFnErrorTest(common.HeatTestCase):
scenarios = [
('select_from_list_not_int',
dict(expect=TypeError,
snippet={"Fn::Select": ["one", ["foo", "bar"]]})),
('select_from_dict_not_str',
dict(expect=TypeError,
snippet={"Fn::Select": [1, {"red": "robin", "re": "foo"}]})),
('select_from_serialized_json_wrong',
dict(expect=ValueError,
snippet={"Fn::Select": ["not", "no json"]})),
('select_wrong_num_args_1',
dict(expect=ValueError,
snippet={"Fn::Select": []})),
('select_wrong_num_args_2',
dict(expect=ValueError,
snippet={"Fn::Select": ["4"]})),
('select_wrong_num_args_3',
dict(expect=ValueError,
snippet={"Fn::Select": ["foo", {"foo": "bar"}, ""]})),
('select_wrong_num_args_4',
dict(expect=TypeError,
snippet={'Fn::Select': [['f'], {'f': 'food'}]})),
('split_no_delim',
dict(expect=ValueError,
snippet={"Fn::Split": ["foo, bar, achoo"]})),
('split_no_list',
dict(expect=TypeError,
snippet={"Fn::Split": "foo, bar, achoo"})),
('base64_list',
dict(expect=TypeError,
snippet={"Fn::Base64": ["foobar"]})),
('base64_dict',
dict(expect=TypeError,
snippet={"Fn::Base64": {"foo": "bar"}})),
('replace_list_value',
dict(expect=TypeError,
snippet={"Fn::Replace": [
{'$var1': 'foo', '%var2%': ['bar']},
'$var1 is %var2%']})),
('replace_list_mapping',
dict(expect=TypeError,
snippet={"Fn::Replace": [
['var1', 'foo', 'var2', 'bar'],
'$var1 is ${var2}']})),
('replace_dict',
dict(expect=TypeError,
snippet={"Fn::Replace": {}})),
('replace_missing_template',
dict(expect=ValueError,
snippet={"Fn::Replace": [['var1', 'foo', 'var2', 'bar']]})),
('replace_none_template',
dict(expect=TypeError,
snippet={"Fn::Replace": [['var2', 'bar'], None]})),
('replace_list_string',
dict(expect=TypeError,
snippet={"Fn::Replace": [
{'var1': 'foo', 'var2': 'bar'},
['$var1 is ${var2}']]})),
('join_string',
dict(expect=TypeError,
snippet={"Fn::Join": [" ", "foo"]})),
('join_dict',
dict(expect=TypeError,
snippet={"Fn::Join": [" ", {"foo": "bar"}]})),
('join_wrong_num_args_1',
dict(expect=ValueError,
snippet={"Fn::Join": []})),
('join_wrong_num_args_2',
dict(expect=ValueError,
snippet={"Fn::Join": [" "]})),
('join_wrong_num_args_3',
dict(expect=ValueError,
snippet={"Fn::Join": [" ", {"foo": "bar"}, ""]})),
('join_string_nodelim',
dict(expect=TypeError,
snippet={"Fn::Join": "o"})),
('join_string_nodelim_1',
dict(expect=TypeError,
snippet={"Fn::Join": "oh"})),
('join_string_nodelim_2',
dict(expect=TypeError,
snippet={"Fn::Join": "ohh"})),
('join_dict_nodelim1',
dict(expect=TypeError,
snippet={"Fn::Join": {"foo": "bar"}})),
('join_dict_nodelim2',
dict(expect=TypeError,
snippet={"Fn::Join": {"foo": "bar", "blarg": "wibble"}})),
('join_dict_nodelim3',
dict(expect=TypeError,
snippet={"Fn::Join": {"foo": "bar", "blarg": "wibble",
"baz": "quux"}})),
('member_list2map_no_key_or_val',
dict(expect=TypeError,
snippet={"Fn::MemberListToMap": [
'Key', ['.member.2.Key=metric',
'.member.2.Value=cpu',
'.member.5.Key=size',
'.member.5.Value=56']]})),
('member_list2map_no_list',
dict(expect=TypeError,
snippet={"Fn::MemberListToMap": [
'Key', '.member.2.Key=metric']})),
('member_list2map_not_string',
dict(expect=TypeError,
snippet={"Fn::MemberListToMap": [
'Name', ['Value'], ['.member.0.Name=metric',
'.member.0.Value=cpu',
'.member.1.Name=size',
'.member.1.Value=56']]})),
]
def test_bad_input(self):
tmpl = template.Template(empty_template)
def resolve(s):
return TemplateTest.resolve(s, tmpl)
error = self.assertRaises(self.expect,
resolve,
self.snippet)
self.assertIn(list(six.iterkeys(self.snippet))[0],
six.text_type(error))
class ResolveDataTest(common.HeatTestCase):
def setUp(self):
super(ResolveDataTest, self).setUp()
self.username = 'parser_stack_test_user'
self.ctx = utils.dummy_context()
self.stack = stack.Stack(self.ctx, 'resolve_test_stack',
template.Template(empty_template))
def resolve(self, snippet):
return function.resolve(self.stack.t.parse(self.stack, snippet))
def test_join_split(self):
# join
snippet = {'Fn::Join': [';', ['one', 'two', 'three']]}
self.assertEqual('one;two;three', self.resolve(snippet))
# join then split
snippet = {'Fn::Split': [';', snippet]}
self.assertEqual(['one', 'two', 'three'], self.resolve(snippet))
def test_split_join_split_join(self):
# each snippet in this test encapsulates
# the snippet from the previous step, leading
# to increasingly nested function calls
# split
snippet = {'Fn::Split': [',', 'one,two,three']}
self.assertEqual(['one', 'two', 'three'], self.resolve(snippet))
# split then join
snippet = {'Fn::Join': [';', snippet]}
self.assertEqual('one;two;three', self.resolve(snippet))
# split then join then split
snippet = {'Fn::Split': [';', snippet]}
self.assertEqual(['one', 'two', 'three'], self.resolve(snippet))
# split then join then split then join
snippet = {'Fn::Join': ['-', snippet]}
self.assertEqual('one-two-three', self.resolve(snippet))
def test_join_recursive(self):
raw = {'Fn::Join': ['\n', [{'Fn::Join':
[' ', ['foo', 'bar']]}, 'baz']]}
self.assertEqual('foo bar\nbaz', self.resolve(raw))
def test_join_not_string(self):
snippet = {'Fn::Join': ['\n', [{'Fn::Join':
[' ', ['foo', 45]]}, 'baz']]}
error = self.assertRaises(TypeError,
self.resolve, snippet)
self.assertIn('45', six.text_type(error))
def test_base64_replace(self):
raw = {'Fn::Base64': {'Fn::Replace': [
{'foo': 'bar'}, 'Meet at the foo']}}
self.assertEqual('Meet at the bar', self.resolve(raw))
def test_replace_base64(self):
raw = {'Fn::Replace': [{'foo': 'bar'}, {
'Fn::Base64': 'Meet at the foo'}]}
self.assertEqual('Meet at the bar', self.resolve(raw))
def test_nested_selects(self):
data = {
'a': ['one', 'two', 'three'],
'b': ['een', 'twee', {'d': 'D', 'e': 'E'}]
}
raw = {'Fn::Select': ['a', data]}
self.assertEqual(data['a'], self.resolve(raw))
raw = {'Fn::Select': ['b', data]}
self.assertEqual(data['b'], self.resolve(raw))
raw = {
'Fn::Select': ['1', {
'Fn::Select': ['b', data]
}]
}
self.assertEqual('twee', self.resolve(raw))
raw = {
'Fn::Select': ['e', {
'Fn::Select': ['2', {
'Fn::Select': ['b', data]
}]
}]
}
self.assertEqual('E', self.resolve(raw))
def test_member_list_select(self):
snippet = {'Fn::Select': ['metric', {"Fn::MemberListToMap": [
'Name', 'Value', ['.member.0.Name=metric',
'.member.0.Value=cpu',
'.member.1.Name=size',
'.member.1.Value=56']]}]}
self.assertEqual('cpu', self.resolve(snippet))
|
|
"""
Test various aspects of the detectors classes
"""
import numpy
import pytest
import serpentTools
from serpentTools import detectors
@pytest.fixture(scope="module")
def meshedBinData():
bins = numpy.ones((25, 12), order="f")
bins[:, -1] = range(25)
bins[:, -2] = range(25)
bins[:, -3] = numpy.tile(range(1, 6), 5)
bins[:, -4] = numpy.repeat(range(1, 6), 5)
tallies = numpy.arange(25).reshape(5, 5)
errors = tallies.copy()
return bins, tallies, errors
def testDetectorProperties(meshedBinData):
bins, tallies, errors = meshedBinData
detector = detectors.Detector(
"test", bins=bins, tallies=tallies, errors=errors)
# Modify the tally data
detector.tallies = detector.tallies * 2
assert (detector.tallies == tallies * 2).all()
detector.errors = detector.errors * 2
assert (detector.errors == errors * 2).all()
energies = numpy.arange(bins.shape[0] * 3).reshape(bins.shape[0], 3)
energyDet = detectors.Detector(
"energy", bins=bins, grids={"E": energies})
assert (energyDet.energy == energies).all()
# Test setting indexes
detector.indexes = tuple(range(len(tallies.shape)))
with pytest.raises(ValueError, match="indexes"):
detector.indexes = detector.indexes[:1]
@pytest.mark.parametrize("how", ["bins", "grids", "bare", "init"])
def testCartesianDetector(meshedBinData, how):
xgrid = numpy.empty((5, 3))
xgrid[:, 0] = range(5)
xgrid[:, 1] = xgrid[:, 0] + 1
xgrid[:, 2] = xgrid[:, 0] + 0.5
ygrid = xgrid.copy()
zgrid = numpy.array([[-1, 1, 0]])
bins, tallies, errors = meshedBinData
grids = {"X": xgrid.copy(), "Y": ygrid.copy(), "Z": zgrid.copy()}
if how == "bare":
detector = detectors.CartesianDetector("xy_bare")
detector.bins = bins
detector.tallies = tallies
detector.errors = errors
detector.x = grids["X"]
detector.y = grids["Y"]
detector.z = grids["Z"]
elif how == "grids":
detector = detectors.CartesianDetector(
"xy_full", bins=bins, tallies=tallies, errors=errors,
grids=grids)
elif how == "init":
detector = detectors.CartesianDetector(
"xy_full", bins=bins, tallies=tallies, errors=errors,
x=grids["X"], y=grids["Y"], z=grids["Z"])
elif how == "bins":
detector = detectors.CartesianDetector.fromTallyBins(
"xyBins", bins=bins, grids=grids)
# Modify the tally data
detector.tallies = tallies * 2
assert (detector.tallies == tallies * 2).all()
detector.errors = errors * 2
assert (detector.errors == errors * 2).all()
assert (detector.x == xgrid).all()
assert (detector.y == ygrid).all()
assert (detector.z == zgrid).all()
# Emulate scaling by some conversion factor
detector.x *= 100
assert (detector.x == xgrid * 100).all()
detector.y *= 100
assert (detector.y == ygrid * 100).all()
detector.z *= 100
assert (detector.z == zgrid * 100).all()
# Test failure modes
for gridk in ["x", "y", "z"]:
msg = ".*shape of {} grid".format(gridk)
with pytest.raises(ValueError, match=msg):
setattr(detector, gridk, [1, 2, 3])
# Test setting indexes
detector.indexes = tuple(range(len(tallies.shape)))
with pytest.raises(ValueError, match="indexes"):
detector.indexes = detector.indexes[:1]
@pytest.mark.parametrize("how", ["grids", "bins", "bare", "init"])
def testHexagonalDetector(meshedBinData, how):
centers = numpy.array([
[-3.000000E+00, -1.732051E+00],
[-2.500000E+00, -8.660254E-01],
[-2.000000E+00, 0.000000E+00],
[-1.500000E+00, 8.660254E-01],
[-1.000000E+00, 1.732051E+00],
[-2.000000E+00, -1.732051E+00],
[-1.500000E+00, -8.660254E-01],
[-1.000000E+00, 0.000000E+00],
[-5.000000E-01, 8.660254E-01],
[0.000000E+00, 1.732051E+00],
[-1.000000E+00, -1.732051E+00],
[-5.000000E-01, -8.660254E-01],
[0.000000E+00, 0.000000E+00],
[5.000000E-01, 8.660254E-01],
[1.000000E+00, 1.732051E+00],
[0.000000E+00, -1.732051E+00],
[5.000000E-01, -8.660254E-01],
[1.000000E+00, 0.000000E+00],
[1.500000E+00, 8.660254E-01],
[2.000000E+00, 1.732051E+00],
[1.000000E+00, -1.732051E+00],
[1.500000E+00, -8.660254E-01],
[2.000000E+00, 0.000000E+00],
[2.500000E+00, 8.660254E-01],
[3.000000E+00, 1.732051E+00],
])
z = numpy.array([[0, 0, 0]])
bins, tallies, errors = meshedBinData
pitch = 1.0
hexType = 2
if how == "init":
detector = detectors.HexagonalDetector(
"hexInit", bins=bins, tallies=tallies, errors=errors,
z=z, centers=centers, pitch=pitch, hexType=hexType)
elif how == "grids":
detector = detectors.HexagonalDetector(
"hexGrids", bins=bins, tallies=tallies, errors=errors,
grids={"Z": z, "COORD": centers})
elif how == "bins":
detector = detectors.HexagonalDetector.fromTallyBins(
"hexBins", bins, grids={"Z": z, "COORD": centers})
elif how == "bare":
detector = detectors.HexagonalDetector("hexBins")
detector.bins = bins
detector.tallies = tallies
detector.errors = errors
detector.z = z
detector.centers = centers
if how != "init":
detector.pitch = pitch
detector.hexType = hexType
assert (detector.bins == bins).all()
assert (detector.tallies == tallies).all()
assert (detector.errors == errors).all()
assert (detector.z == z).all()
assert (detector.centers == centers).all()
assert detector.pitch == pitch
assert detector.hexType == hexType
detector.tallies = detector.tallies * 2
detector.errors = detector.errors * 2
detector.z = detector.z * 2
detector.centers = detector.centers * 2
detector.pitch = detector.pitch * 2
assert (detector.tallies == tallies * 2).all()
assert (detector.errors == errors * 2).all()
assert (detector.z == z * 2).all()
assert (detector.centers == centers * 2).all()
assert detector.pitch == pitch * 2
# Test failure modes
with pytest.raises(ValueError, match="Hex type"):
detector.hexType = -1
with pytest.raises(ValueError, match="Pitch must be positive"):
detector.pitch = 0
with pytest.raises(ValueError, match="Pitch must be positive"):
detector.pitch = -1
with pytest.raises(TypeError, match="Cannot set pitch"):
detector.pitch = [1, 2]
with pytest.raises(ValueError, match="Expected centers"):
detector.centers = detector.centers[:5]
with pytest.raises(ValueError, match="Expected shape of z"):
detector.z = [[-1, 0, -0.5], [0, 1, 0.5]]
@pytest.fixture(scope="module")
def binsWithScores():
bins = numpy.ones((2, 13))
bins[0, -3] = 1.5
return bins
def testNoScores(binsWithScores):
with pytest.raises(ValueError, match=".*scores"):
detectors.Detector("scored", bins=binsWithScores)
with pytest.raises(ValueError, match=".*scores"):
detectors.Detector("scored").bins = binsWithScores
with pytest.raises(ValueError, match=".*scores"):
detectors.Detector.fromTallyBins("scored", bins=binsWithScores)
@pytest.fixture
def similarDetectorFile(tmp_path):
detFile = tmp_path / "similar_det0.m"
detFile.absolute()
with detFile.open("w") as stream:
stream.write("""DETspectrum = [
1 1 1 1 1 1 1 1 1 1 8.19312E+17 0.05187
];
DETspectrumA = [
1 1 1 1 1 1 1 1 1 1 8.19312E+17 0.05187
];
DETspectrumAE = [
0.00000E-11 4.13994E-07 2.07002E-07
];
DETspectrumACOORD = [
0.00000E-11 4.13994E-07
];
DETspectrumB = [
1 1 1 1 1 1 1 1 1 1 8.19312E+17 0.05187
];""")
yield str(detFile)
detFile.unlink()
def test_similarDetectors(similarDetectorFile):
reader = serpentTools.read(similarDetectorFile)
assert set(reader.detectors) == {"spectrum", "spectrumA", "spectrumB"}
assert isinstance(reader["spectrumA"], detectors.HexagonalDetector)
|
|
from __future__ import absolute_import, print_function
import random
import numpy as np
from .. import types
from .templates import (ConcreteTemplate, AbstractTemplate, AttributeTemplate,
CallableTemplate, Registry, signature, bound_function,
make_callable_template)
# Ensure list is typed as a collection as well
from . import collections
registry = Registry()
builtin = registry.register
builtin_global = registry.register_global
builtin_attr = registry.register_attr
class ListBuiltin(AbstractTemplate):
key = list
def generic(self, args, kws):
assert not kws
if args:
iterable, = args
if isinstance(iterable, types.IterableType):
dtype = iterable.iterator_type.yield_type
return signature(types.List(dtype), iterable)
builtin_global(list, types.Function(ListBuiltin))
class SortedBuiltin(CallableTemplate):
key = sorted
def generic(self):
def typer(iterable, reverse=None):
if not isinstance(iterable, types.IterableType):
return
if (reverse is not None and
not isinstance(reverse, types.Boolean)):
return
return types.List(iterable.iterator_type.yield_type)
return typer
builtin_global(sorted, types.Function(SortedBuiltin))
@builtin_attr
class ListAttribute(AttributeTemplate):
key = types.List
# NOTE: some of these should be Sequence / MutableSequence methods
@bound_function("list.append")
def resolve_append(self, list, args, kws):
item, = args
assert not kws
unified = self.context.unify_pairs(list.dtype, item)
sig = signature(types.none, unified)
sig.recvr = types.List(unified)
return sig
@bound_function("list.clear")
def resolve_clear(self, list, args, kws):
assert not args
assert not kws
return signature(types.none)
@bound_function("list.copy")
def resolve_copy(self, list, args, kws):
assert not args
assert not kws
return signature(list)
@bound_function("list.count")
def resolve_count(self, list, args, kws):
item, = args
assert not kws
return signature(types.intp, list.dtype)
@bound_function("list.extend")
def resolve_extend(self, list, args, kws):
iterable, = args
assert not kws
if not isinstance(iterable, types.IterableType):
return
dtype = iterable.iterator_type.yield_type
unified = self.context.unify_pairs(list.dtype, dtype)
sig = signature(types.none, iterable)
sig.recvr = types.List(unified)
return sig
@bound_function("list.index")
def resolve_index(self, list, args, kws):
assert not kws
if len(args) == 1:
return signature(types.intp, list.dtype)
elif len(args) == 2:
if isinstance(args[1], types.Integer):
return signature(types.intp, list.dtype, types.intp)
elif len(args) == 3:
if (isinstance(args[1], types.Integer)
and isinstance(args[2], types.Integer)):
return signature(types.intp, list.dtype, types.intp, types.intp)
@bound_function("list.insert")
def resolve_insert(self, list, args, kws):
idx, item = args
assert not kws
if isinstance(idx, types.Integer):
unified = self.context.unify_pairs(list.dtype, item)
sig = signature(types.none, types.intp, unified)
sig.recvr = types.List(unified)
return sig
@bound_function("list.pop")
def resolve_pop(self, list, args, kws):
assert not kws
if not args:
return signature(list.dtype)
else:
idx, = args
if isinstance(idx, types.Integer):
return signature(list.dtype, types.intp)
@bound_function("list.remove")
def resolve_remove(self, list, args, kws):
assert not kws
if len(args) == 1:
return signature(types.none, list.dtype)
@bound_function("list.reverse")
def resolve_reverse(self, list, args, kws):
assert not args
assert not kws
return signature(types.none)
def resolve_sort(self, list):
def typer(reverse=None):
if (reverse is not None and
not isinstance(reverse, types.Boolean)):
return
return types.none
return types.BoundFunction(make_callable_template(key="list.sort",
typer=typer,
recvr=list),
list)
@builtin
class AddList(AbstractTemplate):
key = "+"
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if isinstance(a, types.List) and isinstance(b, types.List):
unified = self.context.unify_pairs(a, b)
return signature(unified, a, b)
@builtin
class InplaceAddList(AbstractTemplate):
key = "+="
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if isinstance(a, types.List) and isinstance(b, types.List):
if self.context.can_convert(b.dtype, a.dtype):
return signature(a, a, b)
@builtin
class MulList(AbstractTemplate):
key = "*"
def generic(self, args, kws):
a, b = args
if isinstance(a, types.List) and isinstance(b, types.Integer):
return signature(a, a, types.intp)
@builtin
class InplaceMulList(MulList):
key = "*="
class ListCompare(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.List) and isinstance(rhs, types.List):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key,
(lhs.dtype, rhs.dtype), {})
if res is not None:
return signature(types.boolean, lhs, rhs)
@builtin
class ListEq(ListCompare):
key = '=='
@builtin
class ListNe(ListCompare):
key = '!='
@builtin
class ListLt(ListCompare):
key = '<'
@builtin
class ListLe(ListCompare):
key = '<='
@builtin
class ListGt(ListCompare):
key = '>'
@builtin
class ListGe(ListCompare):
key = '>='
|
|
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import zipfile
from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.distlib.markers import interpret as markers_interpret
from pip._vendor.six.moves import configparser
import pip.wheel
from pip.compat import native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path,
call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir,
get_installed_version
)
from pip.utils.logging import indent_log
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel
from pip._vendor.packaging.version import Version
logger = logging.getLogger(__name__)
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, as_egg=False, update=True, editable_options=None,
pycompile=True, markers=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
self.extras = ()
if isinstance(req, six.string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self._wheel_cache = wheel_cache
self.link = link
self.as_egg = as_egg
self.markers = markers
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = None
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.options = options if options else {}
self.pycompile = pycompile
# Set to True after successful preparation of this requirement
self.prepared = False
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False, options=None, wheel_cache=None,
constraint=False):
from pip.index import Link
name, url, extras_override, editable_options = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
editable_options=editable_options,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
if (os.path.isdir(p) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
options = options if options else {}
res = cls(req, comes_from, link=link, markers=markers,
isolated=isolated, options=options,
wheel_cache=wheel_cache, constraint=constraint)
if extras:
res.extras = pkg_resources.Requirement.parse('__placeholder__' +
extras).extras
return res
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
@property
def link(self):
return self._link
@link.setter
def link(self, link):
# Lookup a cached wheel, if possible.
if self._wheel_cache is None:
self._link = link
else:
self._link = self._wheel_cache.cached_wheel(link, self.name)
if self._link != link:
logger.debug('Using cached wheel link: %s', self._link)
@property
def specifier(self):
return self.req.specifier
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
try:
import setuptools # noqa
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
ensure_dir(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(
pkg_resources.parse_version(self.pkg_info()["Version"]),
Version):
op = "=="
else:
op = "==="
self.req = pkg_resources.Requirement.parse(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
]))
self._correct_build_location()
# FIXME: This is a lame hack, entirely for PasteScript which has
# a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(
getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'),
__file__,
'exec'
))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
return get_installed_version(self.name)
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif distutils_egg_info:
raise UninstallationError(
"Detected a distutils installed project ({0!r}) which we "
"cannot uninstall. The metadata provided by distutils does "
"not contain a list of files which have been installed, so "
"pip does not know which files to uninstall.".format(self.name)
)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = configparser.SafeConfigParser()
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.project_name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error(
"Can't commit %s, nothing uninstalled.", self.project_name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self):
if self.markers is not None:
return markers_interpret(self.markers)
else:
return True
def install(self, install_options, global_options=[], root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options += self.options.get('global_options', [])
install_options += self.options.get('install_options', [])
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
logger.info('Running setup.py install for %s', self.name)
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
show_stdout=False,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
os.path.relpath(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
rmtree(temp_location)
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
with indent_log():
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
"import setuptools, tokenize; __file__=%r; exec(compile("
"getattr(tokenize, 'open', open)(__file__).read().replace"
"('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=cwd,
show_stdout=False)
self.install_succeeded = True
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip('/')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
from pip.index import Link
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
{},
)
else:
return package_name, url_no_extras, None, {}
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, None, options
|
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import numpy
import datetime
import _idl_histogram
def histogram(data, binsize=None, maxv=None, minv=None, nbins=None, omax=None,
omin=None, reverse_indices=None, locations=None, input_arr=None,
nan=False):
"""
Replicates the histogram function avaiable within IDL
(Interactive Data Language, EXELISvis).
:param data:
A 1-Dimensional array to calculate the histogram for.
:param binsize:
(Optional) The binsize (Default is 1) to be used for creating the
histogram.
:param maxv:
(Optional) The maximum value to be used in creating the histogram.
If not specified the array will be searched for max.
:param minv:
(Optional) The minimum value to be used in creating the histogram.
If not specified the array will be searched for min.
:param nbins:
(Optional) The number of bins to be used for creating the histogram.
If set binsize is calculated as ((max - min) / (nbins - 1)), and the
max value will be adjusted to (nbins*binsize + min).
:param omax:
(Optional) A string name used to refer to the dictionary key
that will contain the maximum value used in generating the histogram.
:param omin:
(Optional) A string name used to refer to the dictionary key
that will contain the minimum value used in generating the histogram.
:param reverse_indices:
(Optional) A string name used to refer to the
dictionary key that will contain the reverse indices of the histogram.
:param locations:
(Optional) A string name used to refer to the dictionary
key that will contain the starting locations of each bin.
:param input_arr:
(Optional) Used to specify an input array that will be added to the
result of the histogram. Useful for tiling mechanisms that only handle
portions of an array at a time. The input array must be 1-Dimensional
and contain at least as many elements as are required to construct
the histogram.
:param nan:
If set to True (Default is False) then nan values will be
accounted for and treated as missing data.
:return:
A dictionary containing the histogram and other optional components.
The dictionary key name for the histogram is 'histogram'.
Example:
>>> h = histogram(data, minv=0, omin='omin', omax='omax', reverse_indices='ri')
>>> hist = h['histogram']
>>> ri = h['ri']
>>> loc = loc['ri']
>>> data_at_ith_bin_indices = data[ri[ri[i]:ri[i+1]]]
:author:
Josh Sixsmith; josh.sixsmith@gmail.com; joshua.sixsmith@ga.gov.au
:history:
* 04/02/2013: Created
* 05/04/2013: Added nan keyword
* 05/06/2013: Now checks for max value of 256 and datatype of 'uint8'
* 12/06/2013: Added input_arr keyword
:copyright:
Copyright (c) 2014, Josh Sixsmith
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
def hist_int(data, n, minv, maxv, binsize, nbins, max_bin, ri):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the histogram. Stuff not to be included gets dumped
# into the 1st position then removed prior to returning to the user.
nbins_ = nbins + 1
hist = numpy.zeros(int(nbins_), dtype='uint32')
_idl_histogram.idl_histogram.histogram_int(data, hist, n, nbins_, minv,
maxv, max_bin, binsize)
if ri:
return hist
else:
return hist[1:]
def hist_long(data, n, minv, maxv, binsize, nbins, max_bin, ri):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the histogram. Stuff not to be included gets dumped
# into the 1st position then removed prior to returning to the user.
nbins_ = nbins + 1
hist = numpy.zeros(int(nbins_), dtype='uint32')
_idl_histogram.idl_histogram.histogram_long(data, hist, n, nbins_,
minv, maxv, max_bin,
binsize)
if ri:
return hist
else:
return hist[1:]
def hist_dlong(data, n, minv, maxv, binsize, nbins, max_bin, ri):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the histogram. Stuff not to be included gets dumped
# into the 1st position then removed prior to returning to the user.
nbins_ = nbins + 1
hist = numpy.zeros(int(nbins_), dtype='uint32')
_idl_histogram.idl_histogram.histogram_dlong(data, hist, n, nbins_,
minv, maxv, max_bin,
binsize)
if ri:
return hist
else:
return hist[1:]
def hist_float(data, n, minv, maxv, binsize, nbins, max_bin, ri):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the histogram. Stuff not to be included gets dumped
# into the 1st position then removed prior to returning to the user.
nbins_ = nbins + 1
hist = numpy.zeros(int(nbins_), dtype='uint32')
_idl_histogram.idl_histogram.histogram_float(data, hist, n, nbins_,
minv, maxv, max_bin,
binsize)
if ri:
return hist
else:
return hist[1:]
def hist_dfloat(data, n, minv, maxv, binsize, nbins, max_bin, ri):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the histogram. Stuff not to be included gets dumped
# into the 1st position then removed prior to returning to the user.
nbins_ = nbins + 1
hist = numpy.zeros(int(nbins_), dtype='uint32')
_idl_histogram.idl_histogram.histogram_dfloat(data, hist, n, nbins_,
minv, maxv, max_bin,
binsize)
if ri:
return hist
else:
return hist[1:]
def ri_int(data, hist, nbins, n, ri_sz, minv, maxv, max_bin, binsize):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the reverse indices. Stuff not to be included gets
# dumped into the 1st position then removed prior to returning to the
# user.
nbins_ = nbins + 1
ri = numpy.zeros(int(ri_sz), dtype='uint32')
_idl_histogram.idl_histogram.reverse_indices_int(data, hist, ri,
nbins_, n, ri_sz,
minv, maxv, max_bin,
binsize)
return (hist[1:], ri[1:])
def ri_long(data, hist, nbins, n, ri_sz, minv, maxv, max_bin, binsize):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the reverse indices. Stuff not to be included gets
# dumped into the 1st position then removed prior to returning to the
# user.
nbins_ = nbins + 1
ri = numpy.zeros(int(ri_sz), dtype='uint32')
_idl_histogram.idl_histogram.reverse_indices_long(data, hist, ri,
nbins_, n, ri_sz,
minv, maxv, max_bin,
binsize)
return (hist[1:], ri[1:])
def ri_dlong(data, hist, nbins, n, ri_sz, minv, maxv, max_bin, binsize):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the reverse indices. Stuff not to be included gets
# dumped into the 1st position then removed prior to returning to the
# user.
nbins_ = nbins + 1
ri = numpy.zeros(int(ri_sz), dtype='uint32')
_idl_histogram.idl_histogram.reverse_indices_dlong(data, hist, ri,
nbins_, n, ri_sz,
minv, maxv, max_bin,
binsize)
return (hist[1:], ri[1:])
def ri_float(data, hist, nbins, n, ri_sz, minv, maxv, max_bin, binsize):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the reverse indices. Stuff not to be included gets
# dumped into the 1st position then removed prior to returning to the
# user.
nbins_ = nbins + 1
ri = numpy.zeros(int(ri_sz), dtype='uint32')
_idl_histogram.idl_histogram.reverse_indices_float(data, hist, ri,
nbins_, n, ri_sz,
minv, maxv, max_bin,
binsize)
return (hist[1:], ri[1:])
def ri_dfloat(data, hist, nbins, n, ri_sz, minv, maxv, max_bin, binsize):
# increase the size by one. When specifying a min and max, it shouldn't
# be included in the reverse indices. Stuff not to be included gets
# dumped into the 1st position then removed prior to returning to the
# user.
nbins_ = nbins + 1
ri = numpy.zeros(int(ri_sz), dtype='uint32')
_idl_histogram.idl_histogram.reverse_indices_dfloat(data, hist, ri,
nbins_, n, ri_sz,
minv, maxv,
max_bin, binsize)
return (hist[1:], ri[1:])
def datatype(val):
instr = str(val)
return {'int8': '1',
'uint8': '1',
'int16': '2',
'uint16': '12',
'int32': '3',
'uint32': '13',
'int64': '13',
'uint64': '15',
'int': '13',
'float32': '4',
'float64': '5'}.get(instr, 'Error')
def data_convert(val, b):
instr = str(val)
return {'int8': numpy.int8(b),
'uint8': numpy.uint8(b),
'int16': numpy.int16(b),
'uint16': numpy.uint16(b),
'int32': numpy.int32(b),
'uint32': numpy.uint32(b),
'int64': numpy.int64(b),
'uint64': numpy.uint64(b),
'int': numpy.int64(b),
'float32': numpy.float32(b),
'float64': numpy.float64(b)}.get(instr, 'Error')
dtype = datatype(data.dtype.name)
if (dtype == 'Error'):
msg = ("Error. Incompatable Data Type. Compatable Data Types Include: "
"int8, uint8, int16, uint16, int32, uint32, int64, uint64, "
"float32, float64")
raise TypeError(msg)
if len(data.shape) != 1:
data = data.ravel()
if ((maxv is not None) & (binsize is not None) & (nbins is not None)):
msg = ("Error. Conflicting Keywords. maxv cannot be set when both "
"binsize and nbins are set.")
raise Exception(msg)
if ((input_arr is not None) & (reverse_indices is not None)):
msg = ("Error. Conflicting Keywords. Both input_arr and "
"reverse_indices cannot be set at the same time.")
raise Exception(msg)
if (maxv is None):
if nan:
maxv = numpy.nanmax(data)
else:
maxv = numpy.max(data)
if (minv is None):
if nan:
minv = numpy.nanmin(data)
else:
minv = numpy.min(data)
minv = data_convert(data.dtype.name, minv)
maxv = data_convert(data.dtype.name, maxv)
if (binsize is None) & (nbins is None):
binsize = 1
nbins = (maxv - minv) + 1
elif (binsize is None):
# floor division is desireable for ints but not for floats
# py2 would do true divide if the datatype was float
if 'int' in data.dtype.name:
binsize = (maxv - minv) // (nbins - 1)
else:
binsize = (maxv - minv) / (nbins - 1)
maxv = nbins * binsize + minv
elif (binsize is not None) & (nbins is None):
nbins = numpy.floor((maxv - minv) / binsize) + 1
else:
maxv = nbins * binsize + minv
binsize = data_convert(data.dtype.name, binsize)
minv = data_convert(data.dtype.name, minv)
# If nbins is set to 256 and the array datatype is uint8, then the max
# value will be adjusted to 256, however due to datatype conversions, the
# max value of 256 will change to 0
# This fix conforms with IDL.
if ((maxv == 256) & (data.dtype.name == 'uint8')):
maxv = 255
maxv = data_convert(data.dtype.name, maxv)
#probably also need to pass in a max binvalue into the fortran code
# the max bin value is non-inclusive, but also check that the data
#values are <= the max value
# eg max value = 1.0, but max bin = 1.08, therefore a value of 1.04
# will not be included
max_bin = nbins * binsize + minv
if (binsize == 0):
raise ValueError("Error. Binsize = 0, histogram can't be computed.")
# Probably unessessary to include the max and max_bin equality warning
#if (max == max_bin):
# msg = ("!!!!!Warning!!!!! \n"
# "maxv is equal to the last bin's right edge, "
# "maximum value will not be included in the histogram.")
# print msg
if (input_arr is not None):
# Check that input_arr is 1-Dimensional
if (len(input_arr.shape) != 1):
print("input_arr will be flattened to 1D.")
input_arr = input_arr.ravel()
# Check that input is at least nbins in length
if (input_arr.shape[0] < nbins):
print('Number of elements of input_arr: ', input_arr.shape[0])
print('minimum number of elemets required: ', nbins)
msg = "Error. Input array does not have enough elements."
raise ValueError(msg)
n = numpy.size(data)
# Some unsigned data types will be promoted as Fortran doesn't handle
# unsigned data types.
get_hist = {'int8': hist_int,
'uint8': hist_int,
'int16': hist_int,
'uint16': hist_long,
'int32': hist_long,
'uint32': hist_dlong,
'int64': hist_dlong,
'uint64': hist_dlong,
'int': hist_dlong,
'float32': hist_float,
'float64': hist_dfloat}
ri = False
if (reverse_indices is not None):
ri = True
hist = get_hist[data.dtype.name](data, n, minv, maxv, binsize, nbins,
max_bin, ri)
cum_sum = numpy.sum(hist[1:])
ri_sz = nbins + cum_sum + 1 + 1
get_ri = {'int8': ri_int,
'uint8': ri_int,
'int16': ri_int,
'uint16': ri_long,
'int32': ri_long,
'uint32': ri_dlong,
'int64': ri_dlong,
'uint64': ri_dlong,
'int': ri_dlong,
'float32': ri_float,
'float64': ri_dfloat}
hri = get_ri[data.dtype.name](data, hist, nbins, n, ri_sz, minv, maxv,
max_bin, binsize)
results = {'histogram': hri[0]}
results[reverse_indices] = hri[1]
else:
hist = get_hist[data.dtype.name](data, n, minv, maxv, binsize, nbins,
max_bin, ri)
if (input_arr is not None):
# Now to add the input array to the histogram.
# The result will take the shape of the larger of the two arrays.
if (input_arr.shape[0] == hist.shape[0]):
hist += input_arr
results = {'histogram': hist}
else:
temp = numpy.zeros(input_arr.shape, dtype='uint32')
temp[0:hist.shape[0]] = hist
temp += input_arr
results = {'histogram': temp}
else:
results = {'histogram': hist}
if (omax is not None):
results[omax] = maxv
if (omin is not None):
results[omin] = minv
if (locations is not None):
loc = numpy.zeros(int(nbins), dtype=data.dtype.name)
for i in numpy.arange(int(nbins)):
loc[i] = minv + i * binsize
results[locations] = loc
return results
|
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "install" plugin
#
# Authr: Luis Parravicini
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"run" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import os
import cocos
import BaseHTTPServer
import webbrowser
import threading
class CCPluginRun(cocos.CCPlugin):
"""
Compiles a project and runs it on the target
"""
@staticmethod
def depends_on():
return ('deploy',)
@staticmethod
def plugin_name():
return "run"
@staticmethod
def brief_description():
return cocos.MultiLanguage.get_string('RUN_BRIEF')
def _add_custom_options(self, parser):
parser.add_argument("-m", "--mode", dest="mode", default='debug',
help=cocos.MultiLanguage.get_string('RUN_ARG_MODE'))
group = parser.add_argument_group(cocos.MultiLanguage.get_string('RUN_ARG_GROUP_WEB'))
group.add_argument("-b", "--browser", dest="browser",
help=cocos.MultiLanguage.get_string('RUN_ARG_BROWSER'))
group.add_argument("--port", dest="port", metavar="SERVER_PORT", nargs='?',
help=cocos.MultiLanguage.get_string('RUN_ARG_PORT'))
group.add_argument("--host", dest="host", metavar="SERVER_HOST", nargs='?', default='127.0.0.1',
help=cocos.MultiLanguage.get_string('RUN_ARG_HOST'))
def _check_custom_options(self, args):
self._port = args.port
self._mode = args.mode
self._host = args.host
self._browser = args.browser
def get_ios_sim_name(self):
# get the version of xcodebuild
ver = cocos.get_xcode_version()
if ver.startswith("5"):
ret = "ios-sim-xcode5"
else:
ret = "ios-sim-xcode6"
return ret
def run_ios_sim(self, dependencies):
if not self._platforms.is_ios_active():
return
deploy_dep = dependencies['deploy']
if deploy_dep._use_sdk == 'iphoneos':
cocos.Logging.warning(cocos.MultiLanguage.get_string('RUN_WARNING_IOS_FOR_DEVICE_FMT') %
os.path.dirname(deploy_dep._iosapp_path))
else:
if getattr(sys, 'frozen', None):
cur_dir = os.path.realpath(os.path.dirname(sys.executable))
else:
cur_dir = os.path.realpath(os.path.dirname(__file__))
iossim_exe_path = os.path.join(cur_dir, 'bin', self.get_ios_sim_name())
launch_sim = "%s launch \"%s\" &" % (iossim_exe_path, deploy_dep._iosapp_path)
self._run_cmd(launch_sim)
def run_mac(self, dependencies):
if not self._platforms.is_mac_active():
return
deploy_dep = dependencies['deploy']
launch_macapp = '\"%s/Contents/MacOS/%s\"' % (deploy_dep._macapp_path, deploy_dep.target_name)
self._run_cmd(launch_macapp)
def run_android_device(self, dependencies):
if not self._platforms.is_android_active():
return
sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT')
adb_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(sdk_root, 'platform-tools', 'adb'))
deploy_dep = dependencies['deploy']
startapp = "%s shell am start -n \"%s/%s\"" % (adb_path, deploy_dep.package, deploy_dep.activity)
self._run_cmd(startapp)
pass
def open_webbrowser(self, url):
if self._browser is None:
threading.Event().wait(1)
webbrowser.open_new(url)
else:
if cocos.os_is_mac():
url_cmd = "open -a \"%s\" \"%s\"" % (self._browser, url)
else:
url_cmd = "\"%s\" %s" % (self._browser, url)
self._run_cmd(url_cmd)
def run_web(self, dependencies):
if not self._platforms.is_web_active():
return
from SimpleHTTPServer import SimpleHTTPRequestHandler
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
HandlerClass.protocol_version = Protocol
host = self._host
if self._port is None:
port = 8000
port_max_add = 2000
else:
port = int(self._port)
port_max_add = 0
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
i = 0
httpd = None
while (i <= port_max_add):
port += i
i += 1
server_address = (host, port)
try:
cocos.Logging.info(cocos.MultiLanguage.get_string('RUN_INFO_HOST_PORT_FMT') %
(host, port))
httpd = ServerClass(server_address, HandlerClass)
except Exception as e:
httpd = None
cocos.Logging.warning(cocos.MultiLanguage.get_string('RUN_WARNING_SERVER_FAILED_FMT') %
(host, port, e))
if httpd is not None:
break
if httpd is None:
raise cocos.CCPluginError(cocos.MultiLanguage.get_string('RUN_ERROR_START_SERVER_FAILED'))
from threading import Thread
sub_url = deploy_dep.sub_url
url = 'http://%s:%s%s' % (host, port, sub_url)
thread = Thread(target = self.open_webbrowser, args = (url,))
thread.start()
sa = httpd.socket.getsockname()
with cocos.pushd(run_root):
cocos.Logging.info(cocos.MultiLanguage.get_string('RUN_INFO_SERVING_FMT') % (sa[0], sa[1]))
httpd.serve_forever()
def run_win32(self, dependencies):
if not self._platforms.is_win32_active():
return
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
exe = deploy_dep.project_name
with cocos.pushd(run_root):
self._run_cmd(os.path.join(run_root, exe))
def run_wp8(self, dependencies):
if not self._platforms.is_wp8_active():
return
deploy_dep = dependencies['deploy']
xap_path = deploy_dep.xap_path
deploy_tool = deploy_dep.deploy_tool
cmd = '"%s" /installlaunch "%s" /targetDevice:xd' % (deploy_tool, xap_path)
self._run_cmd(cmd)
def run_linux(self, dependencies):
if not self._platforms.is_linux_active():
return
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
exe = deploy_dep.project_name
with cocos.pushd(run_root):
self._run_cmd(os.path.join(run_root, exe))
def run(self, argv, dependencies):
self.parse_args(argv)
cocos.Logging.info(cocos.MultiLanguage.get_string('RUN_INFO_START_APP'))
self.run_android_device(dependencies)
self.run_ios_sim(dependencies)
self.run_mac(dependencies)
self.run_web(dependencies)
self.run_win32(dependencies)
self.run_linux(dependencies)
self.run_wp8(dependencies)
|
|
"""
Author: Jon Ander Gomez Adrian (jon@dsic.upv.es, http://personales.upv.es/jon)
Version: 2.0
Date: September 2016
Universitat Politecnica de Valencia
Technical University of Valencia TU.VLC
"""
import sys
import numpy
from machine_learning import GMM
from .Constants import Constants
from .State import State
from .Transitions import Transitions
class HMM:
"""
An HMM is characterized by the following attributes:
S: the set of states
states must be able to compute b(s,o_k), the probability of emitting
the symbol 'o_k' by the state 's'
in the continuous case we would say: the probabiblity of generating
the observation or vector 'o_k' by the state 's'
P: the set of initial probabilities of states
P[i] is the probability that a sequence of observations starts in state 'i'
A: the matrix of transitition probabilities from one state to another
row 'i' contains the transition probibilities from state 'i' to each other
An HMM can be discrete, semi-continuous or continuous.
Depending on these modalities the computations of b(s,o_k) will be carried out in
differerent ways.
"""
def __init__(self, identifier = None, modality = 'Discrete', dict_1 = None, dict_2 = None, dict_3 = None):
"""
This constructor admits the creation of an HMM in three ways:
1) from existing states and transitions (parameters in dict_1)
this case is when each HMM represents a model in an application as ASR or HTR,
where the topology is left-to-right with two phantom states, the initial one and the final one
then 'P' is not needed, it is implicit in the first row of the transition matrix A
2) from a given input file, in this case the first line is needed because it is assumed (parameters in dict_2)
it was read by the caller method who provides the input file
there are also needed transitions and states, but in this case as dictionaries, see the load() method
3) from scratch, in this case num_states is mandatory (parameters in dict_3)
"""
#
if modality not in State.valid_modalities:
raise Exception('Fatal error: no valid modality: ' + modality)
#
self.identifier = identifier
self.S = None
self.A = None
self.P = None
self.modality = modality
self.left_to_right = True
self.num_symbols = None
#
if dict_1 is not None:
#
self.transitions = dict_1['transitions']
self.states = dict_1['states']
if 'left_to_right' in dict_1 :
self.left_to_right = dict_1['left_to_right']
if 'num_symbols' in dict_1 :
self.num_symbols = dict_1['num_symbols']
#
elif dict_2 is not None:
#
if 'left_to_right' in dict_2 : self.left_to_right = dict_2['left_to_right']
#
self.load(dict_2['input_file'], dict_2['first_line'], dict_2['d_transitions'], dict_2['d_states'])
#
elif dict_3 is not None:
#
num_states = dict_3['num_states']
self.sample_dim = 1
num_mixtures = 0
if 'left_to_right' in dict_3 :
self.left_to_right = dict_3['left_to_right']
if 'sample_dim' in dict_3 :
self.sample_dim = dict_3['sample_dim']
if 'num_mixtures' in dict_3 :
num_mixtures = dict_3['num_mixtures']
if self.modality == 'Discrete':
self.num_symbols = dict_3['num_symbols']
elif self.modality == 'Semi-continuous' :
classifier = dict_3['classifier']
self.num_symbols = len(classifier)
#
# if self.num_symbols is None the State will be created as continuous by using a GMM.
#
self.A = Transitions(identifier = ('T_%s' % identifier), num_states = num_states, dict_args = dict_3)
#
self.S = [None] * num_states
if self.left_to_right:
for s in range(1, len(self.S) - 1):
self.S[s] = State(identifier = ('S_%s_%d' % (identifier, s)), modality = self.modality, num_symbols = self.num_symbols, sample_dim = self.sample_dim, num_mixtures = num_mixtures, input_file = None, first_line = None)
else:
for s in range(len(self.S)):
self.S[s] = State(identifier = ('S_%s_%d' % (identifier, s)), modality = self.modality, num_symbols = self.num_symbols, sample_dim = self.sample_dim, num_mixtures = num_mixtures, input_file = None, first_line = None)
#
if (not self.left_to_right) and 'pi' in dict_3 :
self.P = dict_3['pi']
else:
self.P = numpy.ones( len(self.S) ) / len(self.S)
if self.A.force_to_one_terminal_state:
self.P = numpy.ones(len(self.S)) / (len(self.S) - 1)
self.P[-1] = 0.0
#
#
if self.identifier is None : raise Exception("Cannot create an HMM without a valid identifier!")
if self.A is None : raise Exception("Cannot create an HMM without transitions!")
if self.S is None : raise Exception("Cannot create an HMM without states!")
if self.P is not None :
self.log_P = numpy.log(self.P + Constants.k_zero_prob)
self.P_accumulator = numpy.zeros(self.P.shape)
else:
self.P_accumulator = None
# --------------------------------------------------------------------------------------------------------------------------------------------
def __len__(self): return len(self.S)
def __str__(self): return self.identifier
def load(self, f, line, d_transitions, d_states):
"""
"""
num_states = 0
if line is None: line = f.readline()
parts = line.split()
while parts[0] != '~h' :
if parts[0] == '~t' :
self.A = Transitions(input_file = f, first_line = line, dict_args = dict(left_to_right = self.left_to_right))
d_transitions[str(self.A)] = self.A
elif parts[0] == '~s' :
state = State(identifier = parts[1].replace('"', ''), modality = 'Discrete', sample_dim = 1, num_mixtures = 0, input_file = f, first_line = line)
d_states[str(state)] = state
#
line = f.readline()
parts = line.split()
#
self.identifier = parts[1].replace('"', '')
line = f.readline()
while line:
parts = line.split()
if parts[0] == "<ENDHMM>":
break
elif parts[0] == "<NUMSTATES>":
num_states=int(parts[1])
self.S = [None] * num_states
elif parts[0] == "<STATE>":
i = int(parts[1]) - 1
line = f.readline()
parts = line.split()
if parts[0] != '~s' : raise Exception('ERROR reading %s' % line)
self.S[i] = d_states[parts[1].replace('"', '')]
elif parts[0] == "~t":
self.A = d_transitions[parts[1].replace('"', '')]
#
line = f.readline()
self.P = numpy.array([s.prior for s in self.S])
# --------------------------------------------------------------------------------------------------------------------------------------------
def initialize_gmm_from_random_samples(self, samples):
for s in self.S:
s.gmm.initialize_from(samples)
# --------------------------------------------------------------------------------------------------------------------------------------------
def initialize_gmm_from_kmeans(self, samples):
from machine_learning import KMeans
kmeans = KMeans(n_clusters = len(self.S), init = 'Katsavounidis', verbosity = 1)
print("HMM.initializegmm_from_kmeans() begins the fit", flush = True)
kmeans.fit(numpy.vstack(samples))
print("HMM.initializegmm_from_kmeans() ends the fit", flush = True)
for k in range(len(self.S)):
self.S[k].gmm.initialize_from_centroids(kmeans.cluster_centers_[k])
# --------------------------------------------------------------------------------------------------------------------------------------------
def save(self, f, save_states_and_transitions = False):
"""
This method assumes that the details of transitions and states has been saved
before and only the identifiers of each state and transition matrix should be
stored here in the definition of the HMM
"""
if save_states_and_transitions:
self.A.save(f)
for state in self.S:
state.save(f)
if self.P is not None:
for i in range(len(self.S)):
self.S[i].prior = self.P[i]
f.write('~h "%s"\n' % self.identifier)
f.write('<BEGINHMM>\n')
f.write('<NUMSTATES> %d\n' % len(self.S))
if self.left_to_right:
_range_ = range(1, len(self.S) - 1)
else:
_range_ = range(len(self.S))
for i in _range_:
s = self.S[i]
f.write('<STATE> %d\n' % (i + 1))
f.write('~s "%s"\n' % str(s))
f.write('~t "%s"\n' % str(self.A))
f.write('<ENDHMM>\n')
# --------------------------------------------------------------------------------------------------------------------------------------------
def get_state(self, i): return self.S[i]
def get_states(self): return self.S[1 : -1] if self.left_to_right else self.S[:]
def log_add(log_probs):
_max_ = log_probs.max()
_sum_ = numpy.log(numpy.exp(log_probs - _max_).sum()) + _max_
return _sum_
# --------------------------------------------------------------------------------------------------------------------------------------------
def forward(self, O):
"""
Version for training a unique HMM, not valid for the concatenation of several HMM as used in ASR or HTR
'O' can be an array of symbols (indexes corresponding to symbols) or an array of real-valued vectors
"""
alpha = numpy.zeros([len(O), len(self)])
B = numpy.zeros([len(self), len(O)])
for j in range(len(self.S)):
B[j, 0] = self.S[j].b(O[0])
alpha[0, j] = self.log_P[j] + B[j, 0]
for t in range(1, alpha.shape[0]):
for j in range(alpha.shape[1]):
#
B[j, t] = self.S[j].b(O[t])
#
# alpha[t, j] = sum_i(alpha[t - 1, i] * A[i, j] ) * B[j, O[t])
#
alpha[t, j] = HMM.log_add(alpha[t - 1, :] + self.A.log_transitions[:, j]) + B[j, t]
return alpha, B, HMM.log_add(alpha[-1, :])
# --------------------------------------------------------------------------------------------------------------------------------------------
def backward(self, O, B, final_probs = None, terminal_nodes = None):
"""
Version for training a unique HMM, not valid for the concatenation of several HMM as used in ASR or HTR
"""
beta = numpy.zeros([len(O), len(self)])
if final_probs is not None:
beta[-1, :] = numpy.log(final_probs + Constants.k_zero_prob)
elif terminal_nodes is not None:
beta[-1, :] = Constants.k_log_zero
for s in terminal_nodes:
beta[-1, s] = 0.0
else:
beta[-1, :] = 0.0 # log(1.0)
t = beta.shape[0] - 2
while t >= 0:
for i in range(beta.shape[1]):
#
# beta[t, i] = sum_j(A[i, j] * B[j, O[t + 1]) * beta[t + 1, j])
#
beta[t, i] = HMM.log_add(self.A.log_transitions[i, :] + B[:, t + 1] + beta[t + 1, :])
t -= 1
return beta
# --------------------------------------------------------------------------------------------------------------------------------------------
def forward_backward(self, O):
"""
Version for training a unique HMM, not valid for the concatenation of several HMM as used in ASR or HTR
"""
alpha, B, _Z_ = self.forward(O)
beta = self.backward(O, B)
gamma = alpha + beta # This must be a sum because what is stored in 'alpha' and 'beta' are logarithms of probabilities
# Gamma must be normalized by means of P(O|lambda) : _Z_ = log P(O|lambda)
gamma = gamma - _Z_
return alpha, beta, gamma, B, _Z_
# --------------------------------------------------------------------------------------------------------------------------------------------
def baum_welch_reset(self):
self.A.reset_accumulators()
for s in self.S: s.reset_accumulators()
self.P_accumulator = None
if self.P is not None:
self.P_accumulator = numpy.zeros(self.P.shape)
# --------------------------------------------------------------------------------------------------------------------------------------------
def baum_welch_update(self):
self.A.update_transitions()
for s in self.S: s.normalize()
if self.P_accumulator is not None:
if self.A.force_to_one_terminal_state: self.P_accumulator[-1] = 0.0
self.P = self.P_accumulator / self.P_accumulator.sum()
self.log_P = numpy.log(self.P + Constants.k_zero_prob)
for i in range(len(self.S)):
self.S[i].prior = self.P[i]
# --------------------------------------------------------------------------------------------------------------------------------------------
def baum_welch_from_a_list(self, list_of_observations, do_reset = True, verbose = True):
if do_reset: self.baum_welch_reset()
if verbose: sys.stderr.write('Training samples %6d: \n' % len(list_of_observations))
counter = 0
for O in list_of_observations:
self.baum_welch(O)
counter += 1
if verbose:
sys.stderr.write('\r %22d' % counter)
# print(" ".join("{}".format(x) for x in O))
self.baum_welch_update()
if verbose:
sys.stderr.write('\n\n')
# --------------------------------------------------------------------------------------------------------------------------------------------
def baum_welch(self, O):
"""
Version for training a unique HMM, not valid for the concatenation of several HMM as used in ASR or HTR
"""
alpha, beta, gamma, B, _Z_ = self.forward_backward(O)
#
"""
gamma_tr = numpy.zeros([len(O) - 1], len(self), len(self)])
for t in range(gamma_tr.shape[0]):
for i in range(gamma_tr.shape[1]):
for j in range(gamma_tr.shape[1]):
gamma_tr[t, i, j] = alpha[t, i] + self.A.get_log_prob(i, j) + B[j, t + 1] + beta[t + 1,j] - gamma[t, i]
#
for i in range(gamma_tr.shape[1]):
for j in range(gamma_tr.shape[1]):
_weight_ = HMM.log_add(gamma_tr[:, i, j])
temp_hmm.A.accumulate_transition(i, j, numpy.exp(_weight_)) # This line is candidate to be modified for accumulating logarithms
#
"""
# UPDATE OF THE STATE-TRANSITION PROBABILITIES
if len(O) > 1:
for i in range(len(self)):
_log_den_ = HMM.log_add(gamma[ : -1, i]) # sum(t = 1..T-1, gamma[t, i])
for j in range(len(self)):
gamma_tr = numpy.zeros(len(O) - 1)
for t in range(gamma_tr.shape[0]):
gamma_tr[t] = alpha[t, i] + self.A.get_log_prob(i, j) + B[j, t + 1] + beta[t + 1, j] - _Z_
_weight_ = numpy.exp(HMM.log_add(gamma_tr[:]) - _log_den_)
self.A.accumulate_transition(i, j, value = _weight_) # This line is candidate to be modified for accumulating logarithms
#
# UDPDATE OF THE STATE STARTING PROBABILITIES
if self.P_accumulator is not None:
self.P_accumulator[:] += gamma[0, :]
# UDPDATE OF THE OUTPUT PROBABILITIES
if self.modality in ['Discrete']:
#
for i in range(gamma.shape[1]):
#
_log_den_ = HMM.log_add(gamma[:, i]) # sum(t = 1..T, gamma[t, i])
_den_ = numpy.exp(_log_den_)
#
for k in numpy.unique(O): # range(self.num_symbols)
_log_num_ = HMM.log_add(gamma[O == k, i])
_weight_ = numpy.exp(_log_num_ - _log_den_)
self.S[i].accumulate_sample(k, _weight_, numpy.exp(_log_num_), _den_) # This line is candidate to be modified for accumulating logarithms
#
elif self.modality in ['Continuous']:
#
for j in range(len(self)):
#
_log_denominator_ = HMM.log_add(gamma[:, j]) # sum(t = 1..T, gamma[t, i])
_denominator_ = numpy.exp(_log_denominator_)
#
_log_densities_ = numpy.zeros([len(O), self.S[j].gmm.n_components])
for t in range(len(O)):
_log_densities_[t, :] = self.S[j].gmm.log_densities(O[t]) # log(c_j_k * g_j_k(O_t))
#
log_xi = numpy.zeros(len(O)) # A one-dimensional vector for computing _xi_t_j_k_ for fixed 'j' and 'k'
for k in range(_log_densities_.shape[1]):
log_xi[0] = self.log_P[j] + _log_densities_[0, k] + beta[0, j] # _xi_0_j_k_
#
for t in range(1, len(O)):
_temp_ = numpy.zeros(len(self))
for i in range(len(self)): # For all the states in the HMM
_temp_[i] = alpha[t - 1, i] + self.A.get_log_prob(i, j) + _log_densities_[t, k] + beta[t, j]
log_xi[t] = HMM.log_add(_temp_) # _xi_t_j_k_ for all t > 0
#
log_xi -= _Z_ # Dividing by P(O|lambda)
#
_xi_t_j_k_ = numpy.exp(log_xi)
#
# In the following lines the code of Baum-Welch directly modifies the accumulators
# of the GMM of each state 'j'
#
self.S[j].gmm_accumulator.acc_posteriors[k] += _xi_t_j_k_.sum() # This value is correct because is used as the denominator for updating mean vectors and covariance matrices
self.S[j].gmm_accumulator.acc_sample_counter[k] += _denominator_ / self.S[j].gmm_accumulator.n_components
#
for t in range(len(O)):
self.S[j].gmm_accumulator.mu[k] += _xi_t_j_k_[t] * O[t]
if self.S[j].gmm_accumulator.covar_type in GMM.covar_diagonal_types:
self.S[j].gmm_accumulator.sigma[k] += _xi_t_j_k_[t] * (O[t] * O[t]) # numpy.diagonal(O[t] * O[t])
else:
self.S[j].gmm_accumulator.sigma[k] += _xi_t_j_k_[t] * numpy.outer(O[t], O[t])
else:
raise Exception('Modality ' + self.modality + ' is not valid or not implemented yet!')
# --------------------------------------------------------------------------------------------------------------------------------------------
def viterbi(self, O):
"""
Version for testing a unique HMM, not valid for the concatenation of several HMM as used in ASR or HTR
'O' can be an array of symbols (indexes corresponding to symbols) or an array of real-valued vectors
"""
predecessor = numpy.ones([len(O), len(self)], dtype = int) * -1
delta = numpy.zeros([len(O), len(self)])
B = numpy.zeros([len(self), len(O)])
for j in range(len(self.S)):
delta[0, j] = self.log_P[j] + self.S[j].b(O[0])
for t in range(1, delta.shape[0]):
for j in range(delta.shape[1]):
#
_temp_ = delta[t - 1, :] + self.A.log_transitions[:, j]
#
_from_ = numpy.argmax(_temp_)
predecessor[t, j] = _from_
delta[t, j] = delta[t - 1, _from_] + self.S[j].b(O[t])
#
#
if self.A.force_to_one_terminal_state:
_best_ = len(delta[-1]) - 1 # According to Transitions.py the terminal state is the last one
else:
_best_ = numpy.argmax(delta[-1, :])
seq = numpy.ones(len(O)) * -1
t = len(O) - 1
i = _best_
while t > 0:
seq[t] = i
i = predecessor[t, i]
t = t - 1
#
return delta[-1, _best_], seq
# --------------------------------------------------------------------------------------------------------------------------------------------
def split_gmm(self, force = False):
for s in self.S: s.split_gmm(force = force)
# --------------------------------------------------------------------------------------------------------------------------------------------
|
|
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
from . import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of15']
class port_mod_prop(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = port_mod_prop.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = port_mod_prop()
obj.type = reader.read("!H")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("port_mod_prop {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class ethernet(port_mod_prop):
type = 0
def __init__(self, advertise=None):
if advertise != None:
self.advertise = advertise
else:
self.advertise = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.advertise))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = ethernet()
_type = reader.read("!H")[0]
assert(_type == 0)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.advertise = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.advertise != other.advertise: return False
return True
def pretty_print(self, q):
q.text("ethernet {")
with q.group():
with q.indent(2):
q.breakable()
q.text("advertise = ");
value_name_map = {1: 'OFPPF_10MB_HD', 2: 'OFPPF_10MB_FD', 4: 'OFPPF_100MB_HD', 8: 'OFPPF_100MB_FD', 16: 'OFPPF_1GB_HD', 32: 'OFPPF_1GB_FD', 64: 'OFPPF_10GB_FD', 128: 'OFPPF_40GB_FD', 256: 'OFPPF_100GB_FD', 512: 'OFPPF_1TB_FD', 1024: 'OFPPF_OTHER', 2048: 'OFPPF_COPPER', 4096: 'OFPPF_FIBER', 8192: 'OFPPF_AUTONEG', 16384: 'OFPPF_PAUSE', 32768: 'OFPPF_PAUSE_ASYM', 2147483648: 'OFPPF_BSN_BREAKOUT_CAPABLE'}
q.text(util.pretty_flags(self.advertise, value_name_map.values()))
q.breakable()
q.text('}')
port_mod_prop.subtypes[0] = ethernet
class experimenter(port_mod_prop):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, exp_type=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if exp_type != None:
self.exp_type = exp_type
else:
self.exp_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.exp_type))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.experimenter = reader.read("!L")[0]
obj.exp_type = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.exp_type != other.exp_type: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("exp_type = ");
q.text("%#x" % self.exp_type)
q.breakable()
q.text('}')
port_mod_prop.subtypes[65535] = experimenter
class optical(port_mod_prop):
type = 1
def __init__(self, configure=None, freq_ldma=None, fl_offset=None, grid_span=None, tx_pwr=None):
if configure != None:
self.configure = configure
else:
self.configure = 0
if freq_ldma != None:
self.freq_ldma = freq_ldma
else:
self.freq_ldma = 0
if fl_offset != None:
self.fl_offset = fl_offset
else:
self.fl_offset = 0
if grid_span != None:
self.grid_span = grid_span
else:
self.grid_span = 0
if tx_pwr != None:
self.tx_pwr = tx_pwr
else:
self.tx_pwr = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 1
packed.append(struct.pack("!L", self.configure))
packed.append(struct.pack("!L", self.freq_ldma))
packed.append(struct.pack("!L", self.fl_offset))
packed.append(struct.pack("!L", self.grid_span))
packed.append(struct.pack("!L", self.tx_pwr))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = optical()
_type = reader.read("!H")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.configure = reader.read("!L")[0]
obj.freq_ldma = reader.read("!L")[0]
obj.fl_offset = reader.read("!L")[0]
obj.grid_span = reader.read("!L")[0]
obj.tx_pwr = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.configure != other.configure: return False
if self.freq_ldma != other.freq_ldma: return False
if self.fl_offset != other.fl_offset: return False
if self.grid_span != other.grid_span: return False
if self.tx_pwr != other.tx_pwr: return False
return True
def pretty_print(self, q):
q.text("optical {")
with q.group():
with q.indent(2):
q.breakable()
q.text("configure = ");
q.text("%#x" % self.configure)
q.text(","); q.breakable()
q.text("freq_ldma = ");
q.text("%#x" % self.freq_ldma)
q.text(","); q.breakable()
q.text("fl_offset = ");
q.text("%#x" % self.fl_offset)
q.text(","); q.breakable()
q.text("grid_span = ");
q.text("%#x" % self.grid_span)
q.text(","); q.breakable()
q.text("tx_pwr = ");
q.text("%#x" % self.tx_pwr)
q.breakable()
q.text('}')
port_mod_prop.subtypes[1] = optical
|
|
from __future__ import unicode_literals
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.template.response import TemplateResponse
from django.utils.log import getLogger
from django.utils.decorators import classonlymethod
logger = getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in kwargs.iteritems():
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r" % (
cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
self.request = request
self.args = args
self.kwargs = kwargs
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': self.request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = 0
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
response_class = TemplateResponse
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
return self.response_class(
request = self.request,
template = self.get_template_names(),
context = context,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view is different from all the others
insofar as it also passes ``kwargs`` as ``params`` to the template context.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(params=kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = True
url = None
query_string = False
def get_redirect_url(self, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
else:
return None
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(**kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', self.request.path,
extra={
'status_code': 410,
'request': self.request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
|
|
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError("%r is not JSON serializable" % (o,))
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
# Django modification: try to use the system version first, providing it's
# either of a later version of has the C speedups in place. Otherwise, fall
# back to our local copy.
__version__ = '2.0.7'
use_system_version = False
try:
# The system-installed version has priority providing it is either not an
# earlier version or it contains the C speedups.
import simplejson
if (simplejson.__version__.split('.') >= __version__.split('.') or
hasattr(simplejson, '_speedups')):
from simplejson import *
use_system_version = True
# Make sure we copy over the version. See #17071
__version__ = simplejson.__version__
except ImportError:
pass
if not use_system_version:
try:
from json import * # Python 2.6 preferred over local copy.
# There is a "json" package around that is not Python's "json", so we
# check for something that is only in the namespace of the version we
# want.
JSONDecoder
use_system_version = True
# Make sure we copy over the version. See #17071
from json import __version__ as json_version
__version__ = json_version
except (ImportError, NameError):
pass
# If all else fails, we have a bundled version that can be used.
if not use_system_version:
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from django.utils.simplejson.decoder import JSONDecoder
from django.utils.simplejson.encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CSR sparse matrix tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import sparse
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
CPU = "/device:CPU:0"
GPU = "/device:GPU:0"
def dense_to_csr_sparse_matrix(dense):
dense_t = ops.convert_to_tensor(dense)
locs = array_ops.stop_gradient(array_ops.where(math_ops.abs(dense_t) > 0))
return sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(dense_t, locs)
def _swap(a, i, j):
a[i], a[j] = a[j], a[i]
class CSRSparseMatrixOpsTest(test.TestCase):
@classmethod
def setUpClass(cls): # pylint: disable=g-missing-super-call
cls._gpu_available = test_util.is_gpu_available()
# TODO(ebrevdo): This will work once we find a way to get rendezvous
# working for CSRSparseMatrix and can remove the HostMemory
# annotations for the other ops.
@test_util.run_in_graph_and_eager_modes
def DISABLEDtestFromProto(self):
if not self._gpu_available:
return
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.asarray([1.0, 5.0], dtype=np.float32)
a_dense_shape = np.asarray([5, 6], dtype=np.int64)
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_csr_mat = a_sparse_mat.tocsr()
a_col_inds = a_csr_mat.indices
a_row_ptrs = a_csr_mat.indptr
# Format of SparseMatrix:
# type_name == "tensorflow::CSRSparseMatrix"
# metadata == b (validated)
# tensors == [dense_shape, row_ptrs, col_indices, values]
dense_shape_proto = tensor_util.make_tensor_proto(a_dense_shape)
row_ptrs_proto = tensor_util.make_tensor_proto(a_row_ptrs)
col_inds_proto = tensor_util.make_tensor_proto(a_col_inds)
values_proto = tensor_util.make_tensor_proto(a_values)
variant_tensor_data = tensor_pb2.VariantTensorDataProto(
type_name="tensorflow::CSRSparseMatrix",
metadata=np.asarray(True).tobytes(),
tensors=[
dense_shape_proto, row_ptrs_proto, col_inds_proto, values_proto
])
tensor_proto = tensor_pb2.TensorProto(
dtype=dtypes.variant.as_datatype_enum,
tensor_shape=tensor_shape.TensorShape([]).as_proto())
tensor_proto.variant_val.extend([variant_tensor_data])
a_sm = constant_op.constant(tensor_proto)
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
a_sm, type=dtypes.float32)
self.evaluate(a_rt)
@test_util.run_in_graph_and_eager_modes
def testSparseTensorConversion(self):
a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
a_values = [1.0, 5.0, -1.0, -2.0]
a_dense_shape = [5, 6]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_csr_mat = a_sparse_mat.tocsr()
# Convert 2D SparseTensor to CSR Matrix
a_st = sparse_tensor.SparseTensor(a_indices, a_values, a_dense_shape)
a_st = math_ops.cast(a_st, dtypes.float32)
a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
a_st.indices, a_st.values, a_st.dense_shape)
# Get row indices and columns for batch 0.
a_sm_row_ptrs, a_sm_col_inds, a_sm_values = (
sparse_csr_matrix_ops.csr_sparse_matrix_components(
a_sm, 0, type=a_st.dtype))
a_sm_row_ptrs_values, a_sm_col_inds_values, a_sm_values_values = (
self.evaluate((a_sm_row_ptrs, a_sm_col_inds, a_sm_values)))
self.assertAllEqual(a_csr_mat.indices, a_sm_col_inds_values)
self.assertAllEqual(a_csr_mat.indptr, a_sm_row_ptrs_values)
self.assertAllClose(a_values, a_sm_values_values)
# Convert CSR Matrix to 2D SparseTensor
a_st_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
a_sm, type=a_st.dtype)
a_st_rt_value = self.evaluate(a_st_rt)
self.assertAllEqual(a_indices, a_st_rt_value.indices)
self.assertAllClose(a_values, a_st_rt_value.values)
self.assertAllEqual(a_dense_shape, a_st_rt_value.dense_shape)
# TODO(b/139491352): Add handle_data propagation to array_ops.identity.
@test_util.run_deprecated_v1
def testCSRSparseMatrixResourceVariable(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
a_mats = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
a_sm = dense_to_csr_sparse_matrix(a_mats)
with ops.device("/gpu:0"):
v = variable_scope.get_variable("sm", initializer=a_sm, use_resource=True)
v_id = array_ops.identity(v)
self.assertEqual(
sparse_csr_matrix_ops.dense_shape_and_type(v_id).shape, a_mats.shape)
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
v, type=dtypes.float32)
v_reassign = state_ops.assign(v, v_id).op
with self.assertRaisesOpError("Error while reading resource variable sm"):
self.evaluate(a_rt)
self.evaluate(v.initializer)
a_rt_value = self.evaluate(a_rt)
self.assertAllClose(a_mats, a_rt_value)
self.evaluate(v_reassign)
a_rt_reassigned_value = self.evaluate(a_rt)
self.assertAllClose(a_mats, a_rt_reassigned_value)
@test_util.run_in_graph_and_eager_modes
def testBatchSparseTensorConversion(self):
a_indices = np.array([[0, 0, 0], [0, 2, 3], [2, 0, 1]])
a_values = [1.0, 5.0, 6.0]
a_dense_shape = [3, 5, 6]
a_sparse_mats = [
sparse.coo_matrix(([1.0, 5.0], ([0, 2], [0, 3])),
shape=a_dense_shape[1:]),
sparse.coo_matrix(([], ([], [])), shape=a_dense_shape[1:]),
sparse.coo_matrix(([6.0], ([0], [1])), shape=a_dense_shape[1:])
]
a_csr_mats = [m.tocsr() for m in a_sparse_mats]
# Convert 3D SparseTensor to CSR Matrix
a_st = sparse_tensor.SparseTensor(a_indices, a_values, a_dense_shape)
a_st = math_ops.cast(a_st, dtypes.float32)
a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
a_st.indices, a_st.values, a_st.dense_shape)
# Get row indices and columns for batches.
a_sm_components = [
sparse_csr_matrix_ops.csr_sparse_matrix_components(
a_sm, i, type=a_st.dtype) for i in range(3)
]
a_sm_values = self.evaluate(a_sm_components)
for i, (a_sm_val, a_csr_mat) in enumerate(zip(a_sm_values, a_csr_mats)):
tf_logging.info("Comparing batch %d" % i)
self.assertAllEqual(a_csr_mat.indptr, a_sm_val.row_ptrs)
self.assertAllEqual(a_csr_mat.indices, a_sm_val.col_inds)
self.assertAllClose(a_csr_mat.data, a_sm_val.values)
# Convert CSR batched Matrix to 3D SparseTensor
a_st_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
a_sm, type=a_st.dtype)
a_st_rt_value = self.evaluate(a_st_rt)
self.assertAllEqual(a_indices, a_st_rt_value.indices)
self.assertAllClose(a_values, a_st_rt_value.values)
self.assertAllEqual(a_dense_shape, a_st_rt_value.dense_shape)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseTensorConversion(self):
# Test two sets of conversions to check behavior of the ops in a
# concurrent environment (parallel executions of the ST -> SM ops).
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
mats = [
sparsify(np.random.randn(*dense_shape)).astype(np.float32)
for _ in range(2)
]
csr_mats = [list(map(sparse.csr_matrix, mat)) for mat in mats]
mats_t = [ops.convert_to_tensor(mat) for mat in mats]
mats_locs = [array_ops.where(mat_t > 0) for mat_t in mats_t]
sparse_tensors = list()
for mat_t, mat_loc in zip(mats_t, mats_locs):
sparse_tensors.append(
sparse_tensor.SparseTensor(mat_loc,
array_ops.gather_nd(mat_t,
mat_loc), dense_shape))
sparse_matrices = [
sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
st.indices, st.values, st.dense_shape) for st in sparse_tensors
]
sm_nnz = [
sparse_csr_matrix_ops.sparse_matrix_nnz(sm) for sm in sparse_matrices
]
# Get row indices and columns for batches.
sm_components = list()
for sm in sparse_matrices:
sm_components.append([
sparse_csr_matrix_ops.csr_sparse_matrix_components(
sm, i, type=dtypes.float32) for i in range(dense_shape[0])
])
sm_nnz_values, sm_values = self.evaluate((sm_nnz, sm_components))
for i, (sm_values_i, csr_mats_i) in enumerate(zip(sm_values, csr_mats)):
for b, (sm_val, csr_mat) in enumerate(zip(sm_values_i, csr_mats_i)):
tf_logging.info("Comparing matrix %d batch %d" % (i, b))
self.assertEqual(csr_mat.nnz, sm_nnz_values[i][b])
self.assertAllEqual(csr_mat.indptr, sm_val.row_ptrs)
self.assertAllEqual(csr_mat.indices, sm_val.col_inds)
self.assertAllClose(csr_mat.data, sm_val.values)
# Convert CSR batched Matrix to 3D SparseTensor
st_rt = [
sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
sm, type=dtypes.float32) for sm in sparse_matrices
]
st_values, st_rt_values = self.evaluate((sparse_tensors, st_rt))
for (st_value, st_rt_value) in zip(st_values, st_rt_values):
self.assertAllEqual(st_value.indices, st_rt_value.indices)
self.assertAllClose(st_value.values, st_rt_value.values)
self.assertAllEqual(dense_shape, st_rt_value.dense_shape)
@test_util.run_in_graph_and_eager_modes
def testDenseConversion(self):
a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
a_values = np.array([1.0, 5.0, -1.0, -2.0]).astype(np.float32)
a_dense_shape = [5, 6]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_csr_mat = a_sparse_mat.tocsr()
a_dense = a_sparse_mat.todense()
# Convert 2D SparseTensor to CSR Matrix
a_sm = dense_to_csr_sparse_matrix(a_dense)
# Get row indices and columns for batch 0.
a_sm_row_ptrs, a_sm_col_inds, a_sm_values = (
sparse_csr_matrix_ops.csr_sparse_matrix_components(
a_sm, 0, type=dtypes.float32))
a_sm_row_ptrs_values, a_sm_col_inds_values, a_sm_values_values = (
self.evaluate((a_sm_row_ptrs, a_sm_col_inds, a_sm_values)))
self.assertAllEqual(a_csr_mat.indices, a_sm_col_inds_values)
self.assertAllEqual(a_csr_mat.indptr, a_sm_row_ptrs_values)
self.assertAllClose(a_values, a_sm_values_values)
# Convert CSR Matrix to 2D dense matrix
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
a_sm, dtypes.float32)
a_rt_value = self.evaluate(a_rt)
self.assertAllEqual(a_dense, a_rt_value)
@test_util.run_in_graph_and_eager_modes
def testBatchDenseConversion(self):
a_dense_shape = [4, 5, 6]
a_sparse_mats = [
sparse.coo_matrix(([1.0, 5.0], ([0, 2], [0, 3])),
shape=a_dense_shape[1:]),
sparse.coo_matrix(([], ([], [])), shape=a_dense_shape[1:]),
sparse.coo_matrix(([6.0], ([0], [1])), shape=a_dense_shape[1:]),
sparse.coo_matrix(([], ([], [])), shape=a_dense_shape[1:]),
]
a_csr_mats = [m.tocsr() for m in a_sparse_mats]
a_dense = np.asarray([m.todense() for m in a_sparse_mats], dtype=np.float32)
# Convert 3D SparseTensor to CSR Matrix
a_sm = dense_to_csr_sparse_matrix(a_dense)
# Get row indices and columns for batches.
a_sm_components = [
sparse_csr_matrix_ops.csr_sparse_matrix_components(
a_sm, i, type=dtypes.float32) for i in range(3)
]
a_sm_values = self.evaluate(a_sm_components)
for i, (a_sm_val, a_csr_mat) in enumerate(zip(a_sm_values, a_csr_mats)):
tf_logging.info("Comparing batch %d" % i)
self.assertAllEqual(a_csr_mat.indptr, a_sm_val.row_ptrs)
self.assertAllEqual(a_csr_mat.indices, a_sm_val.col_inds)
self.assertAllClose(a_csr_mat.data, a_sm_val.values)
# Convert CSR batched Matrix to 3D SparseTensor
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
a_sm, type=dtypes.float32)
a_rt_value = self.evaluate(a_rt)
self.assertAllEqual(a_dense, a_rt_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchDenseConversion(self):
# Test two sets of conversions to check behavior of the ops in a
# concurrent environment (parallel executions of the ST -> SM
# ops).
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
mats = [
sparsify(np.random.randn(*dense_shape)).astype(np.float32)
for _ in range(2)
]
csr_mats = [[sparse.csr_matrix(m) for m in mat] for mat in mats]
mats_t = [ops.convert_to_tensor(mat) for mat in mats]
mats_locs = [array_ops.where(mat_t > 0) for mat_t in mats_t]
sparse_matrices = [
sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(mat, mat_loc)
for (mat, mat_loc) in zip(mats_t, mats_locs)
]
sm_nnz = [
sparse_csr_matrix_ops.sparse_matrix_nnz(sm) for sm in sparse_matrices
]
# Get row indices and columns for batches.
sm_components = []
for sm in sparse_matrices:
sm_components.append([
sparse_csr_matrix_ops.csr_sparse_matrix_components(
sm, i, type=dtypes.float32) for i in range(dense_shape[0])
])
sm_nnz_values, sm_values = self.evaluate((sm_nnz, sm_components))
for i, (sm_values_i, csr_mats_i) in enumerate(zip(sm_values, csr_mats)):
for b, (sm_val, csr_mat) in enumerate(zip(sm_values_i, csr_mats_i)):
tf_logging.info("Comparing matrix %d batch %d" % (i, b))
self.assertEqual(csr_mat.nnz, sm_nnz_values[i][b])
self.assertAllEqual(csr_mat.indptr, sm_val.row_ptrs)
self.assertAllEqual(csr_mat.indices, sm_val.col_inds)
self.assertAllClose(csr_mat.data, sm_val.values)
# Convert CSR batched Matrix to 3D dense tensor
sm_rt = [
sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sm, type=dtypes.float32) for sm in sparse_matrices
]
sm_rt_values = self.evaluate(sm_rt)
for (mat, sm_rt_value) in zip(mats, sm_rt_values):
self.assertAllEqual(mat, sm_rt_value)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixAdd(self):
if not self._gpu_available:
return
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.array([1.0, 5.0]).astype(np.float32)
a_dense_shape = [5, 6]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
b_indices = np.array([[1, 0], [1, 4], [2, 3], [4, 1]])
b_values = np.array([1.0, 0.5, -5.0, 2.0]).astype(np.float32)
b_dense_shape = [5, 6]
b_sparse_mat = sparse.coo_matrix((b_values,
(b_indices[:, 0], b_indices[:, 1])),
shape=b_dense_shape)
b_dense = b_sparse_mat.todense()
for (alpha, beta) in [(1.0, 1.0), (1.0, -1.0), (0.25, 0.5)]:
a_sum_b_sparse_mat = alpha * a_sparse_mat + beta * b_sparse_mat
# Convert 2D SparseTensor to CSR Matrix
a_sm = dense_to_csr_sparse_matrix(a_dense)
b_sm = dense_to_csr_sparse_matrix(b_dense)
alpha = np.float32(alpha)
beta = np.float32(beta)
c_sm = sparse_csr_matrix_ops.sparse_matrix_add(
a_sm, b_sm, alpha=alpha, beta=beta)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
c_dense_value = self.evaluate(c_dense)
self.assertAllClose(a_sum_b_sparse_mat.todense(), c_dense_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixAdd(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
a_mats = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
b_mats = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
for (alpha, beta) in [(1.0, 1.0), (1.0, -1.0), (0.25, 0.5)]:
tf_logging.info("testLargeBatchSparseMatrixAdd, comparing "
"alpha, beta (%d, %d)" % (alpha, beta))
a_sm = dense_to_csr_sparse_matrix(a_mats)
b_sm = dense_to_csr_sparse_matrix(b_mats)
alpha = np.float32(alpha)
beta = np.float32(beta)
c_sm = sparse_csr_matrix_ops.sparse_matrix_add(
a_sm, b_sm, alpha=alpha, beta=beta)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
c_dense_value = self.evaluate(c_dense)
self.assertAllClose(c_dense_value, alpha * a_mats + beta * b_mats)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixMatMul(self):
if not self._gpu_available:
return
for shapes in [[(5, 6), (6, 1)], [(5, 6), (6, 2)]]:
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.array([1.0, 5.0]).astype(np.float32)
a_dense_shape = shapes[0]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
# Will multiply sparse a (shape=shapes[0]) by dense b (shape=shapes[1]).
b = np.random.randn(*shapes[1]).astype(np.float32)
a_sm = dense_to_csr_sparse_matrix(a_dense)
c = sparse_csr_matrix_ops.sparse_matrix_mat_mul(a=a_sm, b=b)
c_value = self.evaluate(c)
expected_c_value = a_sparse_mat.dot(b)
self.assertAllClose(expected_c_value, c_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMatMul(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
for dtype in np.float32, np.complex64:
for (transpose_a, transpose_b) in ((False, False), (False, True),
(True, False), (True, True)):
for (adjoint_a, adjoint_b) in ((False, False), (False, True),
(True, False), (True, True)):
if (transpose_a and adjoint_a) or (transpose_b and adjoint_b):
continue
for shapes in [[[53, 127, 65], [53, 65, 1]],
[[53, 127, 1], [53, 1, 65]],
[[53, 127, 65], [53, 65, 127]]]:
a_dense_shape = shapes[0]
b_dense_shape = shapes[1]
if transpose_a or adjoint_a:
_swap(a_dense_shape, -2, -1)
if transpose_b or adjoint_b:
_swap(b_dense_shape, -2, -1)
a_mats = sparsify(
(np.random.randn(*a_dense_shape) +
1.j * np.random.randn(*a_dense_shape))).astype(dtype)
b_mats = (np.random.randn(*b_dense_shape) +
1.j * np.random.randn(*b_dense_shape)).astype(dtype)
tf_logging.info(
"testLargeBatchSparseMatrixMatMul transpose_a %s transpose_b "
"%s adjoint_a %s adjoint_b %s" %
(transpose_a, transpose_b, adjoint_a, adjoint_b))
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
a_sm,
b_mats,
transpose_output=False,
conjugate_output=False,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
c_dense_t = math_ops.matmul(
a_mats,
b_mats,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
self.assertAllEqual(c_dense_t.shape, c_t.shape)
c_t_value, c_dense_t_value = self.evaluate((c_t, c_dense_t))
self.assertAllClose(
c_t_value, c_dense_t_value, rtol=1e-6, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMatMulTransposed(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
for dtype in np.float32, np.complex64:
for (transpose_a, transpose_b) in ((False, False), (False, True),
(True, False), (True, True)):
for (adjoint_a, adjoint_b) in ((False, False), (False, True),
(True, False), (True, True)):
if (transpose_a and adjoint_a) or (transpose_b and adjoint_b):
continue
for shapes in [[[53, 127, 65], [53, 65, 1]],
[[53, 127, 1], [53, 1, 65]],
[[53, 127, 65], [53, 65, 127]]]:
a_dense_shape = shapes[0]
b_dense_shape = shapes[1]
if transpose_a or adjoint_a:
_swap(a_dense_shape, -2, -1)
if transpose_b or adjoint_b:
_swap(b_dense_shape, -2, -1)
a_mats = sparsify(
(np.random.randn(*a_dense_shape) +
1.j * np.random.randn(*a_dense_shape))).astype(dtype)
b_mats = (np.random.randn(*b_dense_shape) +
1.j * np.random.randn(*b_dense_shape)).astype(dtype)
tf_logging.info(
"testLargeBatchSparseMatrixMatMul transpose_a %s transpose_b "
"%s adjoint_a %s adjoint_b %s" %
(transpose_a, transpose_b, adjoint_a, adjoint_b))
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
a_sm,
b_mats,
transpose_output=True,
conjugate_output=False,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
# Example: t(adj(a) . b) = t(b) . conj(a)
c_dense_t = math_ops.matmul(
math_ops.conj(b_mats) if adjoint_b else b_mats,
math_ops.conj(a_mats) if adjoint_a else a_mats,
transpose_a=not (transpose_b or adjoint_b),
transpose_b=not (transpose_a or adjoint_a),
adjoint_a=False,
adjoint_b=False)
self.assertAllEqual(c_t.shape, c_dense_t.shape)
c_t_value, c_dense_t_value = self.evaluate((c_t, c_dense_t))
self.assertAllClose(
c_t_value, c_dense_t_value, rtol=1e-6, atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMatMulConjugate(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
a_dense_shape = [53, 65, 127]
b_dense_shape = [53, 127, 67]
a_mats = sparsify(
(np.random.randn(*a_dense_shape) +
1.j * np.random.randn(*a_dense_shape))).astype(np.complex64)
b_mats = (np.random.randn(*b_dense_shape) +
1.j * np.random.randn(*b_dense_shape)).astype(np.complex64)
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
a_sm, b_mats, conjugate_output=True)
c_dense_t = math_ops.conj(math_ops.matmul(a_mats, b_mats))
self.assertAllEqual(c_t.shape, c_dense_t.shape)
c_t_value, c_dense_t_value = self.evaluate((c_t, c_dense_t))
self.assertAllClose(c_t_value, c_dense_t_value, atol=1e-5, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixSparseMatMul(self):
a_indices = np.array([[0, 0], [2, 3]])
a_values = np.array([1.0, 5.0]).astype(np.float32)
a_dense_shape = [5, 6]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
b_indices = np.array([[0, 0], [3, 0], [3, 1]])
b_values = np.array([2.0, 7.0, 8.0]).astype(np.float32)
b_dense_shape = [6, 7]
b_sparse_mat = sparse.coo_matrix((b_values,
(b_indices[:, 0], b_indices[:, 1])),
shape=b_dense_shape)
b_dense = b_sparse_mat.todense()
a_sm = dense_to_csr_sparse_matrix(a_dense)
b_sm = dense_to_csr_sparse_matrix(b_dense)
c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a=a_sm, b=b_sm, type=dtypes.float32)
c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
c_sm_dense_value = self.evaluate(c_sm_dense)
expected_c_value = a_sparse_mat.dot(b_sparse_mat).todense()
self.assertAllClose(expected_c_value, c_sm_dense_value)
@test_util.run_in_graph_and_eager_modes
def testSparseMatrixSparseMatMul_NumericZerosNotPruned(self):
# Tests that numeric zeros appearing from the sparse-sparse matrix
# multiplication are not pruned from the sparse structural
a_indices = np.array([[0, 0], [0, 2]])
a_values = np.array([2.0, -1.0]).astype(np.float32)
a_dense_shape = [2, 3]
a_sparse_mat = sparse.coo_matrix((a_values,
(a_indices[:, 0], a_indices[:, 1])),
shape=a_dense_shape)
a_dense = a_sparse_mat.todense()
b_indices = np.array([[0, 1], [2, 1]])
b_values = np.array([3.0, 6.0]).astype(np.float32)
b_dense_shape = [3, 2]
b_sparse_mat = sparse.coo_matrix((b_values,
(b_indices[:, 0], b_indices[:, 1])),
shape=b_dense_shape)
b_dense = b_sparse_mat.todense()
# Convert to CSRSparseMatrix while removing numeric zeros from the
# structural representation.
a_sm = dense_to_csr_sparse_matrix(a_dense)
b_sm = dense_to_csr_sparse_matrix(b_dense)
# Compute the matmul.
c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a=a_sm, b=b_sm, type=dtypes.float32)
c_nnz = sparse_csr_matrix_ops.sparse_matrix_nnz(c_sm)
c_nnz_value = self.evaluate(c_nnz)
# Expect that there is a single numeric zero at index (0, 1) if zeros are
# not pruned, since 2.0 * 3.0 + (-1.0) * 6.0 = 0.0.
self.assertAllClose(1, c_nnz_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixSparseMatMul(self):
sparsify = lambda m: m * (m > 0)
for (transpose_a, transpose_b) in ((False, False), (False, True),
(True, False), (True, True)):
for (adjoint_a, adjoint_b) in ((False, False), (False, True),
(True, False), (True, True)):
if (transpose_a and adjoint_a) or (transpose_b and adjoint_b):
continue
a_dense_shape = ([53, 127, 65]
if transpose_a or adjoint_a else [53, 65, 127])
b_dense_shape = ([53, 67, 127]
if transpose_b or adjoint_b else [53, 127, 67])
a_mats = sparsify(np.random.randn(*a_dense_shape)).astype(np.float32)
b_mats = sparsify(np.random.randn(*b_dense_shape).astype(np.float32))
a_sm = dense_to_csr_sparse_matrix(a_mats)
b_sm = dense_to_csr_sparse_matrix(b_mats)
c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a_sm,
b_sm,
type=dtypes.float32,
transpose_a=transpose_a,
adjoint_a=adjoint_a,
transpose_b=transpose_b,
adjoint_b=adjoint_b)
c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, dtypes.float32)
c_dense_t = math_ops.matmul(
a_mats,
b_mats,
transpose_a=transpose_a,
adjoint_a=adjoint_a,
transpose_b=transpose_b,
adjoint_b=adjoint_b)
c_dense_t_value, c_sm_dense_value = self.evaluate(
(c_dense_t, c_sm_dense))
self.assertAllClose(c_sm_dense_value, c_dense_t_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchRegisteredAddN(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
matrices = [
sparsify(np.random.randn(*dense_shape)).astype(np.float32)
for _ in range(16)
]
sparse_matrices = [dense_to_csr_sparse_matrix(mat) for mat in matrices]
sparse_matrices_sum = math_ops.add_n(sparse_matrices)
sparse_matrices_sum_dense = \
sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sparse_matrices_sum, dtypes.float32)
sparse_matrices_sum_dense_value = self.evaluate(sparse_matrices_sum_dense)
# Ensure that the dense (numpy) sum across all batches matches the result
# of add_n converted back to dense.
expected_sum = np.sum(matrices, axis=0)
self.assertAllClose(expected_sum, sparse_matrices_sum_dense_value)
@test_util.run_in_graph_and_eager_modes
def testCSRZeros(self):
if not self._gpu_available:
return
a_dense_shape = [65, 127]
b_dense_shape = [53, 127, 67]
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
# Check both rank-2 and rank-3 tensors.
a_sm = sparse_csr_matrix_ops.sparse_matrix_zeros(
a_dense_shape, type=dtype)
b_sm = sparse_csr_matrix_ops.sparse_matrix_zeros(
b_dense_shape, type=dtype)
a_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(a_sm, type=dtype)
b_rt = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(b_sm, type=dtype)
a_rt_value, b_rt_value = self.evaluate((a_rt, b_rt))
self.assertAllEqual(a_rt_value, np.zeros(a_dense_shape))
self.assertAllEqual(b_rt_value, np.zeros(b_dense_shape))
@test_util.run_in_graph_and_eager_modes
def testLargeBatchZerosLike(self):
if not self._gpu_available:
return
batch_size = 53
rows = 128
cols = 67
dense_shape = [batch_size, rows, cols]
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
sparse_matrices = sparse_csr_matrix_ops.sparse_matrix_zeros(
dense_shape, type=dtype)
zeros_like_sparse_matrices = array_ops.zeros_like(sparse_matrices)
zeros_like_components = [
sparse_csr_matrix_ops.csr_sparse_matrix_components(
zeros_like_sparse_matrices, i, type=dtype)
for i in range(batch_size)
]
zeros_like_components_values = self.evaluate(zeros_like_components)
for component in zeros_like_components_values:
self.assertAllEqual(component.row_ptrs, np.zeros(rows + 1, np.int32))
self.assertAllEqual(component.col_inds, np.empty([0], np.int32))
self.assertAllEqual(component.values, np.empty([0],
dtype.as_numpy_dtype))
@test_util.run_in_graph_and_eager_modes
def testTranspose(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [127, 65]
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
mats = sparsify(
(np.random.randn(*dense_shape) +
1.j * np.random.randn(*dense_shape))).astype(dtype.as_numpy_dtype)
for conjugate in False, True:
expected = np.transpose(mats)
if conjugate:
expected = np.conj(expected)
matrices = math_ops.cast(mats, dtype)
sparse_matrices = dense_to_csr_sparse_matrix(matrices)
transpose_sparse_matrices = \
sparse_csr_matrix_ops.sparse_matrix_transpose(
sparse_matrices, conjugate=conjugate, type=dtype)
dense_transposed = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
transpose_sparse_matrices, dtype)
dense_transposed_values = self.evaluate(dense_transposed)
self.assertAllClose(expected, dense_transposed_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchTranspose(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
mats = sparsify(
(np.random.randn(*dense_shape) +
1.j * np.random.randn(*dense_shape))).astype(dtype.as_numpy_dtype)
expected = np.transpose(mats, (0, 2, 1))
for conjugate in False, True:
if conjugate:
expected = np.conj(expected)
matrices = math_ops.cast(mats, dtype)
sparse_matrices = dense_to_csr_sparse_matrix(matrices)
transpose_sparse_matrices = \
sparse_csr_matrix_ops.sparse_matrix_transpose(
sparse_matrices, conjugate=conjugate, type=dtype)
dense_transposed = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
transpose_sparse_matrices, dtype)
dense_transposed_values = self.evaluate(dense_transposed)
self.assertAllClose(expected, dense_transposed_values)
@test_util.run_in_graph_and_eager_modes
def testSoftmax(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [127, 65]
logits = sparsify(np.random.randn(*dense_shape))
logits_with_ninf = np.copy(logits)
logits_with_ninf[logits == 0] = -np.inf
data_types = [dtypes.float32, dtypes.float64]
for dtype in data_types:
logits_t = math_ops.cast(logits, dtype)
logits_t_with_ninf = math_ops.cast(logits_with_ninf, dtype)
expected = nn_ops.softmax(logits_t_with_ninf)
sparse_logits_t = dense_to_csr_sparse_matrix(logits_t)
softmax_sparse_logits_t = sparse_csr_matrix_ops.sparse_matrix_softmax(
sparse_logits_t, type=dtype)
dense_softmax = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
softmax_sparse_logits_t, dtype)
dense_softmax_values, expected_values = self.evaluate(
(dense_softmax, expected))
self.assertAllClose(expected_values, dense_softmax_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSoftmax(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
logits = sparsify(np.random.randn(*dense_shape))
logits_with_ninf = np.copy(logits)
logits_with_ninf[logits == 0] = -np.inf
data_types = [dtypes.float32, dtypes.float64]
for dtype in data_types:
logits_t = math_ops.cast(logits, dtype)
logits_t_with_ninf = math_ops.cast(logits_with_ninf, dtype)
expected = nn_ops.softmax(logits_t_with_ninf)
sparse_logits_t = dense_to_csr_sparse_matrix(logits_t)
softmax_sparse_logits_t = sparse_csr_matrix_ops.sparse_matrix_softmax(
sparse_logits_t, type=dtype)
dense_softmax = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
softmax_sparse_logits_t, dtype)
dense_softmax_values, expected_values = self.evaluate(
(dense_softmax, expected))
self.assertAllClose(expected_values, dense_softmax_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSoftmaxEmpty(self):
if not self._gpu_available:
return
dense_shape = [53, 65, 127]
sparse_logits_t = sparse_csr_matrix_ops.sparse_matrix_zeros(
dense_shape, type=dtypes.float32)
softmax_sparse_logits_t = sparse_csr_matrix_ops.sparse_matrix_softmax(
sparse_logits_t, type=dtypes.float32)
dense_softmax = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
softmax_sparse_logits_t, dtypes.float32)
dense_softmax_values = self.evaluate(dense_softmax)
self.assertAllEqual(
np.zeros_like(dense_softmax_values), dense_softmax_values)
@test_util.run_in_graph_and_eager_modes
def testSoftmaxGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [127, 65]
softmax = sparsify(np.random.randn(*dense_shape))
grad_softmax = sparsify(np.random.randn(*dense_shape))
expected = (
(grad_softmax - np.sum(grad_softmax * softmax, -1, keepdims=True)) *
softmax)
data_types = [dtypes.float32, dtypes.float64]
for dtype in data_types:
softmax_t = math_ops.cast(softmax, dtype)
grad_softmax_t = math_ops.cast(grad_softmax, dtype)
softmax_sparse = dense_to_csr_sparse_matrix(softmax_t)
grad_softmax_sparse = dense_to_csr_sparse_matrix(grad_softmax_t)
gradients_sparse = sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
softmax_sparse, grad_softmax_sparse, dtype)
dense_gradients = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_sparse, dtype)
dense_gradients_values = self.evaluate((dense_gradients))
self.assertAllClose(expected, dense_gradients_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSoftmaxGrad(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
softmax = sparsify(np.random.randn(*dense_shape))
grad_softmax = sparsify(np.random.randn(*dense_shape))
expected = (
(grad_softmax - np.sum(grad_softmax * softmax, -1, keepdims=True)) *
softmax)
data_types = [dtypes.float32, dtypes.float64]
for dtype in data_types:
softmax_t = math_ops.cast(softmax, dtype)
grad_softmax_t = math_ops.cast(grad_softmax, dtype)
softmax_sparse = dense_to_csr_sparse_matrix(softmax_t)
grad_softmax_sparse = dense_to_csr_sparse_matrix(grad_softmax_t)
gradients_sparse = sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
softmax_sparse, grad_softmax_sparse, dtype)
dense_gradients = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_sparse, dtype)
dense_gradients_values = self.evaluate((dense_gradients))
self.assertAllClose(expected, dense_gradients_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSoftmaxGradEmpty(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape = [53, 65, 127]
not_empty = sparsify(np.random.randn(*dense_shape)).astype(np.float32)
sparse_empty = sparse_csr_matrix_ops.sparse_matrix_zeros(
dense_shape, type=dtypes.float32)
sparse_not_empty = dense_to_csr_sparse_matrix(not_empty)
gradients_empty_softmax = sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
sparse_empty, sparse_not_empty, dtypes.float32)
gradients_empty_grad_softmax = (
sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
sparse_not_empty, sparse_empty, dtypes.float32))
gradients_empty_both = sparse_csr_matrix_ops.sparse_matrix_softmax_grad(
sparse_empty, sparse_empty, dtypes.float32)
ges = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_empty_softmax, dtypes.float32)
gegs = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_empty_grad_softmax, dtypes.float32)
geb = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
gradients_empty_both, dtypes.float32)
ges_v, gegs_v, geb_v = self.evaluate((ges, gegs, geb))
for v in (ges_v, gegs_v, geb_v):
self.assertAllEqual(np.zeros(dense_shape), v)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchConj(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (np.real(m) > 0)
dense_shape = [53, 65, 127]
matrices = (
sparsify(np.random.randn(*dense_shape)) +
1j * np.random.randn(*dense_shape))
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
matrices_t = matrices.astype(dtype.as_numpy_dtype)
expected = np.conj(matrices_t)
sparse_matrices = dense_to_csr_sparse_matrix(matrices_t)
conj_sparse_matrices = math_ops.conj(sparse_matrices)
dense_conj_matrices = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
conj_sparse_matrices, dtype)
conj_values = self.evaluate(dense_conj_matrices)
self.assertAllClose(expected, conj_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMulScalar(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
a_dense_shape = [53, 65, 127]
a_mats = sparsify(np.random.randn(*a_dense_shape)).astype(np.float32)
b = np.float32(3.5)
expected = a_mats * b
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mul(a_sm, b)
c_dense_t = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_t, dtypes.float32)
c_dense_t_value = self.evaluate(c_dense_t)
self.assertAllClose(expected, c_dense_t_value)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseMatrixMulVec(self):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
a_dense_shape = [53, 65, 127]
a_mats = sparsify(np.random.randn(*a_dense_shape)).astype(np.float32)
b = np.random.randn(53, 1, 1).astype(np.float32)
expected = a_mats * b
a_sm = dense_to_csr_sparse_matrix(a_mats)
c_t = sparse_csr_matrix_ops.sparse_matrix_mul(a_sm, b)
c_dense_t = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_t, dtypes.float32)
c_dense_t_value = self.evaluate(c_dense_t)
self.assertAllClose(expected, c_dense_t_value)
@test_util.run_in_graph_and_eager_modes
def testSparseCholesky(self):
dense_matrix = np.array([
[2, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0],
[1, 1, 7, 0, 0, 0],
[0, 0, 0, 4, 0, 0],
[0, 0, 1, 0, 5, 0],
[0, 0, 2, 0, 1, 6],
]).astype(np.complex128)
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
with test_util.force_cpu():
if dtype.is_complex:
dense_matrix += 0.5j * np.tril(dense_matrix, -1)
sparse_matrix = dense_to_csr_sparse_matrix(
math_ops.cast(dense_matrix, dtype))
# Obtain the Sparse Cholesky factor using AMD Ordering for reducing
# fill-in.
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrices = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtype))
dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
cholesky_sparse_matrices, dtype)
# Compute L * Lh where L is the Sparse Cholesky factor.
verification = math_ops.matmul(
dense_cholesky, array_ops.transpose(dense_cholesky, conjugate=True))
# Assert that input matrix A satisfies A = L * Lh.
verification_values = self.evaluate(verification)
full_dense_matrix = (
dense_matrix +
np.conjugate(np.transpose(np.tril(dense_matrix, -1))))
self.assertAllClose(full_dense_matrix, verification_values)
@test_util.run_in_graph_and_eager_modes
def testBatchSparseCholesky(self):
dense_mat = np.array([
# A diagonal matrix.
[
[1, 0, 0, 0], #
[0, 2, 0, 0], #
[0, 0, 3, 0], #
[0, 0, 0, 4],
], #
# A tridiagonal hermitian matrix.
[
[5 + 0j, 1 + 0j, 0 + 0j, 0 + 0j], #
[1 + 0j, 4 + 0j, 1 + 2j, 0 + 0j], #
[0 + 0j, 1 - 2j, 9 + 0j, 3 - 3j], #
[0 + 0j, 0 + 0j, 3 + 3j, 7 + 0j],
], #
# A diagonal matrix with a corner element; for which
# OrderingAMD returns a non-identity permutation.
[
[1, 0, 0, 1.], #
[0, 2, 0, 0.], #
[0, 0, 3, 0.], #
[1, 0, 0, 4.],
] #
]).astype(np.complex128)
data_types = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128
]
for dtype in data_types:
sparse_matrix = dense_to_csr_sparse_matrix(
math_ops.cast(dense_mat, dtype))
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrix = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtype))
dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
cholesky_sparse_matrix, dtype)
# Compute L * Lh.
verification = math_ops.matmul(
dense_cholesky,
array_ops.transpose(dense_cholesky, perm=[0, 2, 1], conjugate=True))
verification_values = self.evaluate(verification)
self.assertAllClose(
dense_mat.astype(dtype.as_numpy_dtype), verification_values)
@test_util.run_in_graph_and_eager_modes
def testLargeBatchSparseCholesky(self):
sparsity = 0.1
sparsify = lambda m: m * (m > 1 - sparsity)
batch_size = 53
num_rows = 147
dense_shape = [batch_size, num_rows, num_rows]
dense_matrix = sparsify(np.random.uniform(size=dense_shape)).astype(
np.float32)
# Create a "random" SPD matrix, by choosing each entry of A between
# 0 and 1 at the specified density, and computing 0.5(A + At) + n*I.
# This ensures diagonal dominance which implies positive-definiteness.
dense_matrix = (
0.5 *
(dense_matrix + array_ops.transpose(dense_matrix, perm=[0, 2, 1])) +
num_rows * linalg_ops.eye(dense_shape[-1], batch_shape=[batch_size]))
# Compute the fill-in reducing permutation and use it to perform
# the Sparse Cholesky factorization.
sparse_matrix = dense_to_csr_sparse_matrix(dense_matrix)
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrix = \
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32)
dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
cholesky_sparse_matrix, dtypes.float32)
# Compute L * Lh.
verification = math_ops.matmul(
dense_cholesky, array_ops.transpose(dense_cholesky, perm=[0, 2, 1]))
verification_values = self.evaluate(verification)
self.assertAllClose(dense_matrix, verification_values, atol=1e-5, rtol=1e-5)
@test_util.run_in_graph_and_eager_modes
def testSparseCholesky_InvalidMatrix(self):
# Verify that non-SPD matrices result in an Invalid Argument error.
invalid_matrices = [
# zero matrix.
np.array([
[0., 0., 0., 0.], #
[0., 0., 0., 0.], #
[0., 0., 0., 0.], #
[0., 0., 0., 0.] #
]),
# zero diagonal entry.
np.array([
[9., 0., 5., 0.], #
[0., 0., 0., 1.], #
[5., 0., 8., 0.], #
[0., 1., 0., 7.] #
]),
# not positive definite.
np.array([
[2., -2., 0., 0.], #
[-2., 2., 0., 0.], #
[0., 0., 3., -3.], #
[0., 0., -3., 3.] #
]),
]
with test_util.force_cpu():
for invalid_matrix in invalid_matrices:
with self.assertRaises(errors.InvalidArgumentError):
sparse_matrix = dense_to_csr_sparse_matrix(
invalid_matrix.astype(np.float32))
# Compute the fill-in reducing permutation and use it to perform
# the Sparse Cholesky factorization.
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrices = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32))
# Convert the Cholesky factor to a dense matrix to be evaluated.
dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
cholesky_sparse_matrices, type=dtypes.float32)
self.evaluate(dense_cholesky)
@test_util.run_in_graph_and_eager_modes
def testOrderingAMD(self):
num_rows = 6
# An SPD matrix where AMD ordering can reduce fill-in for Cholesky factor.
dense_matrix = np.array([
[7, 0, 0, 0, 0, 0],
[1, 4, 0, 0, 0, 0],
[1, 1, 3, 0, 0, 0],
[0, 0, 0, 4, 0, 0],
[2, 0, 0, 0, 5, 0],
[1, 2, 2, 0, 0, 6],
]).astype(np.float32)
with test_util.force_cpu():
sparse_matrix = dense_to_csr_sparse_matrix(dense_matrix)
# Obtain the Sparse Cholesky factor with the identity permutation as the
# fill-in reducing ordering.
cholesky_without_ordering = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, math_ops.range(num_rows), type=dtypes.float32))
cholesky_without_ordering_nnz = sparse_csr_matrix_ops.sparse_matrix_nnz(
cholesky_without_ordering)
# Obtain the Sparse Cholesky factor using AMD Ordering for reducing
# fill-in.
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_with_amd = sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32)
cholesky_with_amd_nnz = sparse_csr_matrix_ops.sparse_matrix_nnz(
cholesky_with_amd)
(ordering_amd_value, cholesky_with_amd_nnz_value,
cholesky_without_ordering_nnz_value) = self.evaluate(
[ordering_amd, cholesky_with_amd_nnz, cholesky_without_ordering_nnz])
# AMD ordering should return a valid permutation.
self.assertAllClose(np.arange(num_rows), np.sort(ordering_amd_value))
# Check that cholesky with AMD ordering has a strictly lower nonzero count
# for this matrix.
self.assertLess(cholesky_with_amd_nnz_value,
cholesky_without_ordering_nnz_value)
class CSRSparseMatrixOpsBenchmark(test.Benchmark):
def benchmark_sparse_matrix_mat_mul_gpu(self):
if not test_util.is_gpu_available():
return
sparsify = lambda m: array_ops.where(m > 2, m, array_ops.zeros_like(m))
# XW, X dense and W sparse
# X is shaped [{1, 8, 16}, 2000]
# W is shaped [2000, 4000]
for batch_size in [1, 8, 16]:
x_dense_shape = [batch_size, 2000]
w_dense_shape = [2000, 4000]
with ops.Graph().as_default(), ops.device("/gpu:0"):
x_mats = random_ops.random_normal(x_dense_shape, dtype=dtypes.float32)
w_mats = sparsify(
random_ops.random_normal(w_dense_shape, dtype=dtypes.float32))
nnz = array_ops.shape(array_ops.where(w_mats))[0]
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(w_dense_shape)
w_sm = dense_to_csr_sparse_matrix(w_mats)
with ops.name_scope("w_sm_var"):
w_sm_var = variable_scope.get_variable(
"sm", initializer=w_sm, use_resource=True)
w_sm_var_v = w_sm_var.read_value()
with ops.name_scope("w_var"):
w_var = variable_scope.get_variable(
"sm_dense", initializer=w_mats, use_resource=True)
w_var_v = w_var.read_value()
with ops.name_scope("b"):
x = variable_scope.get_variable(
"b", initializer=x_mats, use_resource=True)
x_v = x.read_value()
# X*W = (W'*X')'
xw_sparse = sparse_csr_matrix_ops.sparse_matrix_mat_mul(
w_sm_var_v,
x_v,
transpose_a=True,
transpose_b=True,
transpose_output=True)
xw_dense = math_ops.matmul(x_v, w_var_v)
with session.Session() as sess:
self.evaluate(
[w_var.initializer, w_sm_var.initializer, x.initializer])
nnz_value, ratio_value = self.evaluate((nnz, ratio))
name_template = (
"sparse_matrix_mat_mul_gpu_%s_W_2000x4000_batch_size_%d")
self.run_op_benchmark(
sess,
xw_sparse.op,
name=name_template % ("sparse", batch_size),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
self.run_op_benchmark(
sess,
xw_dense.op,
name=name_template % ("dense", batch_size),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
def benchmark_sparse_matrix_sparse_matmul(self):
density = 0.05
# pylint: disable=g-long-lambda
sparsify = lambda m: array_ops.where(m > 1. - density, m,
array_ops.zeros_like(m))
# pylint: enable=g-long-lambda
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, 250, 250]
for device in [CPU, GPU]:
if device == GPU and not test_util.is_gpu_available():
continue
with ops.Graph().as_default(), ops.device(device):
x_mats = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
y_mats = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
nnz = array_ops.shape(array_ops.where(x_mats))[0] + array_ops.shape(
array_ops.where(y_mats))[0]
ratio = math_ops.cast(nnz,
dtypes.float32) / (2 * np.prod(dense_shape))
x_sm = dense_to_csr_sparse_matrix(x_mats)
y_sm = dense_to_csr_sparse_matrix(y_mats)
xy_sparse = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
x_sm, y_sm, type=dtypes.float32)
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
name_template = (
"sparse_matrix_sparse_matmul_%s_N_%d_batch_size_%d_threads_%d"
)
device_str = "cpu" if device == CPU else "gpu"
self.run_op_benchmark(
sess,
xy_sparse.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=50)
def benchmark_sparse_dense_conversion(self):
sparsity = 0.05
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, 750, 750]
for device in [CPU, GPU]:
if device == GPU and not test_util.is_gpu_available():
continue
with ops.Graph().as_default(), ops.device(device):
mats = random_ops.random_uniform(dense_shape, dtype=dtypes.float32)
mats_locs = array_ops.where(mats > 1.0 - sparsity)
sparse_matrices = sparse_csr_matrix_ops.dense_to_csr_sparse_matrix(
mats, mats_locs)
dense_matrices = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
sparse_matrices, type=dtypes.float32)
nnz = math_ops.reduce_sum(
sparse_csr_matrix_ops.sparse_matrix_nnz(sparse_matrices))
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(dense_shape)
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
device_str = "cpu" if device == CPU else "gpu"
name_template = (
"dense_to_sparse_matrix_%s_N_%d_batch_size_%d_num_threads_%d")
self.run_op_benchmark(
sess,
sparse_matrices.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=50)
name_template = (
"sparse_matrix_to_dense_%s_N_%d_batch_size_%d_num_threads_%d")
self.run_op_benchmark(
sess,
dense_matrices.op,
name=name_template %
(device_str, dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value,
},
min_iters=50)
def benchmark_sparse_cholesky(self):
# TODO(anudhyan): Use conversions from SparseTensor instead of to get this
# benchmark working for larger matrices. For this to work without GPU, we
# need to write CPU kernels for SparseTensor conversions.
num_rows = 500
density = 0.01
# pylint: disable=g-long-lambda
sparsify = lambda m: array_ops.where(m > 1. - density, m,
array_ops.zeros_like(m))
# pylint: enable=g-long-lambda
for batch_size in [1, 16]:
for num_threads in [1, 4, 12]:
dense_shape = [batch_size, num_rows, num_rows]
with ops.Graph().as_default(), ops.device(CPU):
# Create a "random" SPD matrix, by choosing each entry of A between
# 0 and 1 at the specified density, and computing 0.5(A + At) + n*I.
# This ensures diagonal dominance which implies positive-definiteness.
dense_matrix = sparsify(
random_ops.random_uniform(dense_shape, dtype=dtypes.float32))
spd_dense_matrix = (
0.5 *
(dense_matrix + array_ops.transpose(dense_matrix, perm=[0, 2, 1]))
+ num_rows *
linalg_ops.eye(dense_shape[-1], batch_shape=[batch_size]))
# Convert to SparseMatrix and invoke Sparse Cholesky factorization
# with AMD Ordering.
sparse_matrix = dense_to_csr_sparse_matrix(spd_dense_matrix)
ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(
sparse_matrix)
cholesky_sparse_matrix = (
sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
sparse_matrix, ordering_amd, type=dtypes.float32))
nnz = math_ops.reduce_sum(
sparse_csr_matrix_ops.sparse_matrix_nnz(sparse_matrix))
ratio = math_ops.cast(nnz, dtypes.float32) / np.prod(dense_shape)
ordering_amd_name_template = (
"sparse_matrix_ordering_amd_cpu_N_%d_batch_size_%d_threads_%d")
sparse_cholesky_name_template = (
"sparse_matrix_sparse_cholesky_cpu_N_%d_batch_size_%d_threads_%d")
with session.Session(
config=config_pb2.ConfigProto(
intra_op_parallelism_threads=num_threads)) as sess:
nnz_value, ratio_value = self.evaluate((nnz, ratio))
self.run_op_benchmark(
sess,
ordering_amd.op,
name=ordering_amd_name_template %
(dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=25)
self.run_op_benchmark(
sess,
cholesky_sparse_matrix.op,
name=sparse_cholesky_name_template %
(dense_shape[-1], batch_size, num_threads),
extras={
"percentage_nonzero": ratio_value,
"num_nonzero": nnz_value
},
min_iters=25)
if __name__ == "__main__":
test.main()
|
|
from django import forms
from django.forms.models import ModelFormMetaclass, BaseInlineFormSet
from django.utils.translation import get_language
from django.utils import six
from parler.models import TranslationDoesNotExist
from parler.utils import compat
__all__ = (
'TranslatableModelForm',
'TranslatedField',
'TranslatableModelFormMixin',
#'TranslatableModelFormMetaclass',
)
class TranslatedField(object):
"""
A wrapper for a translated form field.
This wrapper can be used to declare translated fields on the form, e.g.
.. code-block:: python
class MyForm(TranslatableModelForm):
title = TranslatedField()
slug = TranslatedField()
description = TranslatedField(form_class=forms.CharField, widget=TinyMCE)
"""
def __init__(self, **kwargs):
# The metaclass performs the magic replacement with the actual formfield.
self.kwargs = kwargs
class TranslatableModelFormMixin(object):
"""
The base methods added to :class:`TranslatableModelForm` to fetch and store translated fields.
"""
language_code = None # Set by TranslatableAdmin.get_form() on the constructed subclass.
def __init__(self, *args, **kwargs):
current_language = kwargs.pop('_current_language', None) # Used for TranslatableViewMixin
super(TranslatableModelFormMixin, self).__init__(*args, **kwargs)
# Load the initial values for the translated fields
instance = kwargs.get('instance', None)
if instance:
for meta in instance._parler_meta:
try:
# By not auto creating a model, any template code that reads the fields
# will continue to see one of the other translations.
# This also causes admin inlines to show the fallback title in __unicode__.
translation = instance._get_translated_model(meta=meta)
except TranslationDoesNotExist:
pass
else:
for field in meta.get_translated_fields():
self.initial.setdefault(field, getattr(translation, field))
# Typically already set by admin
if self.language_code is None:
if instance:
self.language_code = instance.get_current_language()
return
else:
self.language_code = current_language or get_language()
def _post_clean(self):
# Copy the translated fields into the model
# Make sure the language code is set as early as possible (so it's active during most clean() methods)
self.instance.set_current_language(self.language_code)
self.save_translated_fields()
# Perform the regular clean checks, this also updates self.instance
super(TranslatableModelFormMixin, self)._post_clean()
def save_translated_fields(self):
"""
Save all translated fields.
"""
# Assign translated fields to the model (using the TranslatedAttribute descriptor)
for field in self._get_translated_fields():
try:
value = self.cleaned_data[field]
except KeyError: # Field has a ValidationError
continue
setattr(self.instance, field, value)
def _get_translated_fields(self):
field_names = self._meta.model._parler_meta.get_all_fields()
return [f_name for f_name in field_names if f_name in self.fields]
class TranslatableModelFormMetaclass(ModelFormMetaclass):
"""
Meta class to add translated form fields to the form.
"""
def __new__(mcs, name, bases, attrs):
# Before constructing class, fetch attributes from bases list.
form_meta = _get_mro_attribute(bases, '_meta')
form_base_fields = _get_mro_attribute(bases, 'base_fields', {}) # set by previous class level.
if form_meta:
# Not declaring the base class itself, this is a subclass.
# Read the model from the 'Meta' attribute. This even works in the admin,
# as `modelform_factory()` includes a 'Meta' attribute.
# The other options can be read from the base classes.
form_new_meta = attrs.get('Meta', form_meta)
form_model = form_new_meta.model if form_new_meta else form_meta.model
# Detect all placeholders at this class level.
translated_fields = [
f_name for f_name, attr_value in six.iteritems(attrs) if isinstance(attr_value, TranslatedField)
]
# Include the translated fields as attributes, pretend that these exist on the form.
# This also works when assigning `form = TranslatableModelForm` in the admin,
# since the admin always uses modelform_factory() on the form class, and therefore triggering this metaclass.
if form_model:
for translations_model in form_model._parler_meta.get_all_models():
fields = getattr(form_new_meta, 'fields', form_meta.fields)
exclude = getattr(form_new_meta, 'exclude', form_meta.exclude) or ()
widgets = getattr(form_new_meta, 'widgets', form_meta.widgets) or ()
formfield_callback = attrs.get('formfield_callback', None)
if fields == '__all__':
fields = None
for f_name in translations_model.get_translated_fields():
# Add translated field if not already added, and respect exclude options.
if f_name in translated_fields:
# The TranslatedField placeholder can be replaced directly with actual field, so do that.
attrs[f_name] = _get_model_form_field(translations_model, f_name, formfield_callback=formfield_callback, **translated_fields[f_name].kwargs)
# The next code holds the same logic as fields_for_model()
# The f.editable check happens in _get_model_form_field()
elif f_name not in form_base_fields \
and (fields is None or f_name in fields) \
and f_name not in exclude \
and not f_name in attrs:
# Get declared widget kwargs
if f_name in widgets:
# Not combined with declared fields (e.g. the TranslatedField placeholder)
kwargs = {'widget': widgets[f_name]}
else:
kwargs = {}
# See if this formfield was previously defined using a TranslatedField placeholder.
placeholder = _get_mro_attribute(bases, f_name)
if placeholder and isinstance(placeholder, TranslatedField):
kwargs.update(placeholder.kwargs)
# Add the form field as attribute to the class.
formfield = _get_model_form_field(translations_model, f_name, formfield_callback=formfield_callback, **kwargs)
if formfield is not None:
attrs[f_name] = formfield
# Call the super class with updated `attrs` dict.
return super(TranslatableModelFormMetaclass, mcs).__new__(mcs, name, bases, attrs)
def _get_mro_attribute(bases, name, default=None):
for base in bases:
try:
return getattr(base, name)
except AttributeError:
continue
return default
def _get_model_form_field(model, name, formfield_callback=None, **kwargs):
"""
Utility to create the formfield from a model field.
When a field is not editable, a ``None`` will be returned.
"""
field = model._meta.get_field(name)
if not field.editable: # see fields_for_model() logic in Django.
return None
# Apply admin formfield_overrides
if formfield_callback is None:
formfield = field.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(field, **kwargs)
return formfield
class TranslatableModelForm(compat.with_metaclass(TranslatableModelFormMetaclass, TranslatableModelFormMixin, forms.ModelForm)):
"""
The model form to use for translated models.
"""
# six.with_metaclass does not handle more than 2 parent classes for django < 1.6
# but we need all of them in django 1.7 to pass check admin.E016:
# "The value of 'form' must inherit from 'BaseModelForm'"
# so we use our copied version in parler.utils.compat
class TranslatableBaseInlineFormSet(BaseInlineFormSet):
"""
The formset base for creating inlines with translatable models.
"""
language_code = None
def _construct_form(self, i, **kwargs):
form = super(TranslatableBaseInlineFormSet, self)._construct_form(i, **kwargs)
form.language_code = self.language_code # Pass the language code for new objects!
return form
def save_new(self, form, commit=True):
obj = super(TranslatableBaseInlineFormSet, self).save_new(form, commit)
return obj
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sys
import os
import random
import time
import json
import uuid
import Queue
import multiprocessing
import traceback
#
from mock import Mock
from twisted.internet import reactor
from calvin.utilities import calvinlogger
from calvin.utilities.calvin_callback import CalvinCB, CalvinCBClass
from calvin.runtime.south.plugins.transports.calvinip import calvinip_transport
_log = calvinlogger.get_logger(__name__)
"""
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
def fin():
reactor.callFromThread(reactor.stop)
request.addfinalizer(fin)
print "hejsan"
"""
class BaseTHandler(multiprocessing.Process):
def __init__(self, uri, outqueue, inqueue):
multiprocessing.Process.__init__(self)
self._item = None
self._uri = uri
self._outqueue = outqueue
self._inqueue = inqueue
self._running = False
def set_ttf(self, ttf):
self._ttf = ttf
def _return(self, test=False, variables={}, stack=None):
if stack is None:
stack = traceback.format_stack(limit=15)[:-1]
else:
stack = []
self._outqueue.put([test, stack, variables])
def _stop_reactor(self):
if self._item:
# Server not stopped fail
self._return(False, {'self._item': repr(self._item)})
self._running = False
print(reactor, reactor.running)
if reactor.running:
reactor.callLater(.1, reactor.stop)
def _read_thread(self):
print("%s - Read thread started" % self._name)
while self._running:
try:
cmd = self._inqueue.get(timeout=.1)
except:
continue
func = getattr(self, cmd[0])
print("Running: %s(%s, %s)" % (func.__name__, cmd[1], cmd[2]))
reactor.callFromThread(func, *cmd[1], **cmd[2])
print("%s - Read thread died" % self._name)
def start(self, timeout=10):
self._timeout = timeout
self._running = True
multiprocessing.Process.start(self)
def _base_run(self):
reactor.callLater(self._timeout, self._stop_reactor)
reactor.callInThread(self._read_thread)
if not reactor.running:
reactor.run()
def run(self):
self._base_run()
class ServerHandler(BaseTHandler):
def __init__(self, *args, **kwargs):
self._name = "ServerHandler"
BaseTHandler.__init__(self, *args, **kwargs)
def get_callbacks(self):
return {'server_started': [CalvinCB(self._server_started)],
'server_stopped': [CalvinCB(self._server_stopped)],
'peer_disconnected': [CalvinCB(self._peer_disconnected)],
'peer_connected': [CalvinCB(self._peer_connected)]}
def _data_recieved(self, *args):
print("server_data_recieved", args)
def _peer_connected(self, transport, uri):
print("server_peer_connected", transport)
transport.callback_register('join_finished', CalvinCB(self._join_finished))
transport.callback_register('data_recieved', CalvinCB(self._data_recieved))
def _join_finished(self, transport, _id, uri, is_orginator):
print("server_join_finshed", transport, _id, uri)
self._return(transport._coder is not None and _id and uri, {'transport._coder': transport._coder , 'id': _id, 'uri': uri})
self._return('server_join_finished', {'transport': repr(transport), '_id': _id, 'uri': uri})
pass
def _peer_disconnected(self, *args):
print("server peer disconnected", args)
def _server_stopped(self, *args):
print("Server stopped", args)
self._item = None
self._outqueue.put(["server_stopped", repr(args)])
# Die here ?
self._stop_reactor()
def _stop_server(self):
print("_stop_server")
self._item.stop()
self._return(not self._item.is_listening())
def stop(self):
print("server_stop", self._item)
if self._item:
self._stop_server()
# Timeout
reactor.callLater(3, self._stop_reactor)
def _server_started(self, server, port):
print("Server started", server, port)
self._item = server
# put in queue
self._return(port > 0 and port < 65536, {'port': port})
self._return('server_started', port)
def _start_server(self):
self._ttf.listen(self._uri)
def run(self):
print("start server")
reactor.callLater(0, self._start_server)
self._base_run()
print("server finished")
def _run_command(self, command, *args):
comand(args)
reactor.callLater(0, self.start_server)
def _timeout(self, command, *args):
self._return(["timeout", comand, args])
class ClientHandler(BaseTHandler):
def __init__(self, *args, **kwargs):
self._name = "ServerHandler"
self._port = None
self._stop = False
BaseTHandler.__init__(self, *args, **kwargs)
def set_ttf(self, ttf):
self._ttf = ttf
def set_port(self, port):
print("set_port", port)
self._port = port
def get_callbacks(self):
return {'peer_disconnected': [CalvinCB(self._peer_disconnected)],
'peer_connected': [CalvinCB(self._peer_connected)]}
def _data_recieved(self, data):
print("client_data_recieved", data)
self._return('client_data_recieved', {'data': data})
def _peer_connected(self, transport, uri):
print("client_peer_connected", transport)
transport.callback_register('join_finished', CalvinCB(self._join_finished))
transport.callback_register('data_recieved', CalvinCB(self._data_recieved))
self._return('client_connected', {'transport': repr(transport), 'uri': uri})
self._item = transport
def _join_finished(self, transport, _id, uri, is_orginator):
print("client_join_finshed", transport, _id, uri)
self._return(transport._coder is not None and _id and uri, {'transport._coder': transport._coder , 'id': _id, 'uri': uri})
self._return('client_join_finished', {'transport': repr(transport), '_id': _id, 'uri': uri})
def _peer_disconnected(self, transport, uri, reason):
print("client_peer_disconnected", transport, uri, reason)
#self._return(not self._item.is_connected(), variables={'is_connected': self._item.is_connected()})
self._return('client_disconnected', {'transport': repr(transport), 'reason': reason, 'uri': uri})
# If we have stop stop everything
if self._stop:
self._item = None
self._stop_reactor()
def _stop_client(self):
print("_stop_client(disconnect)")
self._stop = True
self._item.disconnect()
def stop(self):
print("client_stop", self._item)
if self._item:
self._stop_client()
# Timeout
reactor.callLater(1, self._stop_reactor)
def run(self):
print("start client")
self._uri = "%s:%s" % (self._uri, self._port)
reactor.callLater(0, self._ttf.join, self._uri)
self._base_run()
print("client finished")
# @pytest.mark.interactive
class TestTransportServer(object):
_mmanager = multiprocessing.Manager()
def test_start_stop(self, monkeypatch):
shqs = [self._mmanager.Queue(), self._mmanager.Queue()]
sh = ServerHandler("calvinip://localhost", shqs[0], shqs[1])
ttf = calvinip_transport.CalvinTransportFactory(str(uuid.uuid4()), sh.get_callbacks())
sh.set_ttf(ttf)
sh.start()
error = None
try:
while sh.is_alive():
try:
mess = shqs[0].get(timeout=.3)
#print(mess)
except:
continue
if mess[0] == 'timeout':
print(mess[1])
raise Exception("Timeout: %s" % "\n".join(mess[1][11:]))
elif mess[0] == 'server_started':
shqs[1].put(['stop', [], {}])
elif mess[0] == 'server_stopped':
break
else:
#print mess
if not mess[0]:
for a in mess[1]:
print a,
for k,v in mess[2].items():
print "%s = %s" % (k, repr(v))
raise Exception("\n".join(mess[1][11:]))
except Exception as e:
error = e
shqs[1].put(['stop', [], {}])
sh.join(timeout=.2)
if sh.is_alive():
sh.terminate()
if error:
pytest.fail(error)
def test_callbacks(self, monkeypatch):
#self.test_start_stop(monkeypatch)
pass
def test_peer_connected(self, monkeypatch):
pass
# @pytest.mark.interactive
@pytest.mark.slow
class TestTransportClient(object):
test_nodes = 2
_mmanager = multiprocessing.Manager()
def test_connect(self, monkeypatch):
queues = []
shqs = [self._mmanager.Queue(), self._mmanager.Queue()]
chqs = [self._mmanager.Queue(), self._mmanager.Queue()]
sh = ServerHandler("calvinip://127.0.0.1", shqs[0], shqs[1])
ch = ClientHandler("calvinip://127.0.0.1", chqs[0], chqs[1])
ttfs = calvinip_transport.CalvinTransportFactory(str(uuid.uuid4()), sh.get_callbacks())
ttfc = calvinip_transport.CalvinTransportFactory(str(uuid.uuid4()), ch.get_callbacks())
sh.set_ttf(ttfs)
ch.set_ttf(ttfc)
sh.start()
#ch.start()
queues = [shqs, chqs]
cstop = sstop = False
stop = False
error = None
try:
while not stop:
for q in queues:
try:
mess = q[0].get(timeout=.1)
#print(mess[0])
except:
continue
if mess[0] == 'timeout':
print(mess[1])
# TODO: terminate
raise Exception("Timeout: %s" % "\n".join(mess[1][11:]))
elif mess[0] == 'server_stopped':
print "Hej hej"
sstop = True
stop = (sstop and cstop)
elif mess[0] == 'server_started':
ch.set_port(mess[2])
ch.start()
elif mess[0] == 'client_disconnected':
cstop = True
stop = (sstop and cstop)
elif mess[0] == 'client_join_finished':
stop = True
else:
#print mess
if not mess[0]:
for a in mess[1][11:-1]:
print a,
for k,v in mess[2].items():
print "%s = %s" % (k, repr(v))
raise Exception("\n".join(mess[1][11:]))
except Exception as e:
error = e
for tq in queues:
print(repr(tq))
tq[1].put(['stop', [], {}])
print sh.join(timeout=.5)
print ch.join(timeout=.5)
if sh.is_alive():
sh.terminate()
if ch.is_alive():
ch.terminate()
if error:
pytest.fail(error)
def test_data(self, monkeypatch):
pass
def test_callback(self, monkeypatch):
pass
|
|
from django.contrib.auth.models import User
from django.test.utils import override_settings
from funfactory.urlresolvers import reverse
from mock import patch
from nose.tools import eq_, nottest
from product_details import product_details
from pyquery import PyQuery as pq
from common import browserid_mock
from common.tests import ESTestCase, user
from groups.models import Group
from ..helpers import validate_username
from ..models import UserProfile, UsernameBlacklist
Group.objects.get_or_create(name='staff', system=True)
COUNTRIES = product_details.get_regions('en-US')
class RegistrationTest(ESTestCase):
"""Tests registration."""
# Assertion doesn't matter since we monkey patched it for testing
fake_assertion = 'mrfusionsomereallylongstring'
def test_validate_username(self):
"""Test validate_username helper."""
valid_usernames = ['giorgos', 'aakash',
'nikos', 'bat-man']
invalid_usernames = ['administrator', 'test',
'no-reply', 'noreply', 'spam']
for name in valid_usernames:
self.assertTrue(validate_username(name),
'Username: %s did not pass test' % name)
for name in invalid_usernames:
self.assertFalse(validate_username(name),
'Username: %s did not pass test' % name)
def test_mozillacom_registration(self):
"""Verify @mozilla.com users are auto-vouched and marked "staff"."""
d = dict(assertion=self.fake_assertion)
with browserid_mock.mock_browserid('mrfusion@mozilla.com'):
self.client.post(reverse('browserid_verify'), d, follow=True)
d = dict(username='ad',
email='mrfusion@mozilla.com',
full_name='Akaaaaaaash Desaaaaaaai',
optin=True)
with browserid_mock.mock_browserid('mrfusion@mozilla.com'):
r = self.client.post(reverse('register'), d, follow=True)
doc = pq(r.content)
assert r.context['user'].get_profile().is_vouched, (
"Moz.com should be auto-vouched")
assert not doc('#pending-approval'), (
'Moz.com profile page should not having pending vouch div.')
assert r.context['user'].get_profile().groups.filter(name='staff'), (
'Moz.com should belong to the "staff" group.')
def test_plus_signs(self):
email = 'mrfusion+dotcom@mozilla.com'
d = dict(assertion=self.fake_assertion)
with browserid_mock.mock_browserid(email):
self.client.post(reverse('browserid_verify'), d, follow=True)
d = dict(username='ad',
email=email,
full_name='Akaaaaaaash Desaaaaaaai',
optin=True)
with browserid_mock.mock_browserid(email):
self.client.post(reverse('register'), d, follow=True)
assert User.objects.filter(email=d['email'])
def test_username(self):
"""Test that we can submit a perfectly cromulent username.
We verify that /:username then works as well.
"""
email = 'mrfusion+dotcom@mozilla.com'
d = dict(assertion=self.fake_assertion)
with browserid_mock.mock_browserid(email):
self.client.post(reverse('browserid_verify'), d, follow=True)
d = dict(email=email,
username='mrfusion',
full_name='Akaaaaaaash Desaaaaaaai',
optin=True)
with browserid_mock.mock_browserid(email):
r = self.client.post(reverse('register'), d)
eq_(r.status_code, 302, "Problems if we didn't redirect...")
u = User.objects.filter(email=d['email'])[0]
eq_(u.username, 'mrfusion', "Username didn't get set.")
r = self.mozillian_client.get(reverse('profile', args=['mrfusion']),
follow=True)
eq_(r.status_code, 200)
eq_(r.context['profile'].user_id, u.id)
def test_username_characters(self):
"""Verify usernames can have digits/symbols, but nothing too
insane.
"""
email = 'mrfusion+dotcom@mozilla.com'
username = 'mr.fu+s_i-on@246'
d = dict(assertion=self.fake_assertion)
with browserid_mock.mock_browserid(email):
self.client.post(reverse('browserid_verify'), d, follow=True)
d = dict(email=email,
username=username,
full_name='Akaaaaaaash Desaaaaaaai',
optin=True)
with browserid_mock.mock_browserid(email):
r = self.client.post(reverse('register'), d)
eq_(r.status_code, 302, (
'Registration flow should finish with a redirect.'))
u = User.objects.get(email=d['email'])
eq_(u.username, username, 'Username should be set to "%s".' % username)
r = self.mozillian_client.get(reverse('profile', args=[username]),
follow=True)
eq_(r.status_code, 200)
eq_(r.context['profile'].user_id, u.id)
# Now test a username with even weirder characters that we don't allow.
bad_user_email = 'mrfusion+coolbeans@mozilla.com'
bad_username = 'mr.we*rd'
d = dict(assertion=self.fake_assertion)
with browserid_mock.mock_browserid(email):
self.client.post(reverse('browserid_verify'), d, follow=True)
d = dict(email=bad_user_email,
username=bad_username,
full_name='Akaaaaaaash Desaaaaaaai',
optin=True)
with browserid_mock.mock_browserid(email):
r = self.client.post(reverse('register'), d)
eq_(r.status_code, 302, (
'Registration flow should fail; username is bad.'))
assert not User.objects.filter(email=d['email']), (
"User shouldn't exist; username was bad.")
def test_bad_username(self):
"""'about' is a terrible username, as are its silly friends.
Let's make some stop words *and* analyze the routing system,
whenever someone sets their username and verify that they can't
be 'about' or 'help' or anything that is in use.
"""
email = 'mrfusion+dotcom@mozilla.com'
badnames = UsernameBlacklist.objects.all().values_list('value',
flat=True)
# BrowserID needs an assertion not to be whiney
d = dict(assertion=self.fake_assertion)
with browserid_mock.mock_browserid(email):
self.client.post(reverse('browserid_verify'), d, follow=True)
for name in badnames:
d = dict(email=email,
username=name,
full_name='Akaaaaaaash Desaaaaaaai',
optin=True)
with browserid_mock.mock_browserid(email):
r = self.client.post(reverse('register'), d)
eq_(r.status_code, 200,
'This form should fail for "%s", and say so.' % name)
assert r.context['user_form'].errors, (
"Didn't raise errors for %s" % name)
def test_nickname_changes_before_vouch(self):
"""Notify pre-vouched users of URL change from nickname
changes.
See: https://bugzilla.mozilla.org/show_bug.cgi?id=736556
"""
d = dict(assertion=self.fake_assertion)
email = 'soy@latte.net'
with browserid_mock.mock_browserid(email):
self.client.post(reverse('browserid_verify'), d, follow=True)
# Note: No username supplied.
d = dict(email=email,
full_name='Tofu Matt',
optin=True)
with browserid_mock.mock_browserid(email):
r = self.client.post(reverse('register'), d, follow=True)
assert r.context['user'].id, 'User should be created'
assert not r.context['user'].get_profile().is_vouched, (
'User should not be vouched')
d['username'] = 'foobar'
r = self.client.post(reverse('profile.edit'), d, follow=True)
assert 'You changed your username;' in r.content, (
'User should know that changing their username changes '
'their URL.')
def test_repeat_username(self):
"""Verify one cannot repeat email adresses."""
register = dict(username='repeatedun',
full_name='Akaaaaaaash Desaaaaaaai',
optin=True)
# Create first user
email1 = 'mRfUsIoN@mozilla.com'
register.update(email=email1)
d = dict(assertion=self.fake_assertion)
with browserid_mock.mock_browserid(email1):
self.client.post(reverse('browserid_verify'), d, follow=True)
with browserid_mock.mock_browserid(email1):
self.client.post(reverse('register'), register, follow=True)
self.client.logout()
# Create a different user
email2 = 'coldfusion@gmail.com'
register.update(email=email2)
with browserid_mock.mock_browserid(email2):
self.client.post(reverse('browserid_verify'), d, follow=True)
with browserid_mock.mock_browserid(email2):
r = self.client.post(reverse('register'), register, follow=True)
# Make sure we can't use the same username twice
assert r.context['user_form'].errors, "Form should throw errors."
class TestThingsForPeople(ESTestCase):
"""Verify that the wrong users don't see things."""
def test_searchbox(self):
url = reverse('home')
r = self.client.get(url)
doc = pq(r.content)
assert not doc('input[type=text]')
r = self.pending_client.get(url)
doc = pq(r.content)
assert not doc('input[type=text]')
r = self.mozillian_client.get(url)
doc = pq(r.content)
assert doc('input[type=text]')
def test_invitelink(self):
url = reverse('home')
r = self.client.get(url)
doc = pq(r.content)
assert not doc('a#invite')
r = self.pending_client.get(url)
doc = pq(r.content)
assert not doc('a#invite'), "Unvouched can't invite."
r = self.mozillian_client.get(url)
doc = pq(r.content)
assert doc('a#invite')
def test_register_redirects_for_authenticated_users(self):
"""Ensure only anonymous users can register an account."""
r = self.client.get(reverse('home'))
self.assertTrue(200 == r.status_code,
'Anonymous users can access the homepage to '
'begin registration flow')
r = self.mozillian_client.get(reverse('register'))
eq_(302, r.status_code,
'Authenticated users are redirected from registration.')
def test_vouchlink(self):
"""No vouch link when PENDING looks at PENDING."""
url = reverse('profile', args=['pending'])
r = self.mozillian_client.get(url)
doc = pq(r.content)
assert doc('#vouch-form button')
r = self.pending_client.get(url)
doc = pq(r.content)
errmsg = 'Self vouching... silliness.'
assert not doc('#vouch-form button'), errmsg
assert 'Vouch for me' not in r.content, errmsg
@patch('users.admin.index_all_profiles')
def test_es_index_admin_view(self, mock_obj):
"""Test that admin:user_index_profiles work fires a re-index."""
self.mozillian.is_superuser = True
self.mozillian.is_staff = True
self.mozillian.save()
url = reverse('admin:users_index_profiles')
self.client.login(email=self.mozillian.email)
self.client.get(url)
mock_obj.assert_any_call()
class VouchTest(ESTestCase):
# TODO
# Mark this as nottest until we decide the policy in search
# page. Then fix accordingly.
@nottest
def test_vouch_method(self):
"""Test UserProfile.vouch()
Assert that a previously unvouched user shows up as unvouched
in the database.
Assert that when vouched they are listed as vouched.
"""
vouchee = self.mozillian.get_profile()
profile = self.pending.get_profile()
assert not profile.is_vouched, 'User should not yet be vouched.'
r = self.mozillian_client.get(reverse('search'),
{'q': self.pending.email})
assert 'Non-Vouched' in r.content, (
'User should not appear as a Mozillian in search.')
profile.vouch(vouchee)
profile = UserProfile.objects.get(pk=profile.pk)
assert profile.is_vouched, 'User should be marked as vouched.'
r = self.mozillian_client.get(reverse('profile', args=['pending']))
eq_(r.status_code, 200)
doc = pq(r.content)
assert 'Mozillian Profile' in r.content, (
'User should appear as having a vouched profile.')
assert not 'Pending Profile' in r.content, (
'User should not appear as having a pending profile.')
assert not doc('#pending-approval'), (
'Pending profile div should not be in DOM.')
# Make sure the user appears vouched in search results
r = self.mozillian_client.get(reverse('search'),
{'q': self.pending.email})
assert 'Mozillian' in r.content, (
'User should appear as a Mozillian in search.')
class TestUser(ESTestCase):
"""Test User functionality."""
def test_userprofile(self):
u = user()
UserProfile.objects.all().delete()
# Somehow the User lacks a UserProfile
# Note that u.get_profile() caches in memory.
self.assertRaises(UserProfile.DoesNotExist,
lambda: u.userprofile)
# Sign in
with browserid_mock.mock_browserid(u.email):
d = dict(assertion='qwer')
self.client.post(reverse('browserid_verify'), d, follow=True)
# Good to go
assert u.get_profile()
def test_blank_ircname(self):
username = 'thisisatest'
email = 'test@example.com'
register = dict(username=username,
full_name='David Teststhings',
optin=True)
d = {'assertion': 'rarrr'}
with browserid_mock.mock_browserid(email):
self.client.post(reverse('browserid_verify'), d, follow=True)
self.client.post(reverse('register'), register, follow=True)
u = User.objects.filter(email=email)[0]
p = u.get_profile()
p.ircname = ''
eq_(p.ircname, '', 'We need to allow IRCname to be blank')
class TestMigrateRegistration(ESTestCase):
"""Test funky behavior of flee ldap."""
email = 'robot1337@domain.com'
def test_login(self):
"""Given an invite_url go to it and redeem an invite."""
# Lettuce make sure we have a clean slate
info = dict(full_name='Akaaaaaaash Desaaaaaaai', optin=True)
self.client.logout()
u = User.objects.create(username='robot1337', email=self.email)
p = u.get_profile()
p.full_name = info['full_name']
u.save()
p.save()
# BrowserID needs an assertion not to be whiney
d = dict(assertion='tofu')
with browserid_mock.mock_browserid(self.email):
r = self.client.post(reverse('browserid_verify'),
d, follow=True)
eq_(r.status_code, 200)
# Now let's register
with browserid_mock.mock_browserid(self.email):
r = self.client.post(reverse('register'), info, follow=True)
eq_(r.status_code, 200)
@override_settings(AUTO_VOUCH_DOMAINS=['mozilla.com'])
class AutoVouchTests(ESTestCase):
def test_only_autovouch_in_staff(self):
"""Restrict the staff group to emails in AUTO_VOUCH_DOMAINS."""
staff = Group.objects.get_or_create(name='staff', system=True)[0]
staff_user = user(email='abcd@mozilla.com')
staff_profile = staff_user.get_profile()
staff_profile.save()
assert staff in staff_profile.groups.all(), (
'Auto-vouched email in staff group by default.')
staff_profile.groups.remove(staff)
staff_profile.save()
assert staff in staff_profile.groups.all(), (
'Auto-vouched email cannot be removed from staff group.')
community_user = user()
community_profile = community_user.get_profile()
community_profile.save()
assert staff not in community_profile.groups.all(), (
'Non-auto-vouched email not automatically in staff group.')
community_profile.groups.add(staff)
community_profile.save()
assert staff not in community_profile.groups.all(), (
'Non-auto-vouched email cannot be added to staff group.')
def test_autovouch_email(self):
"""Users with emails in AUTO_VOUCH_DOMAINS should be vouched."""
auto_user = user(email='abcd@mozilla.com')
auto_profile = auto_user.get_profile()
auto_profile.save()
assert auto_profile.is_vouched, 'Profile should be vouched.'
assert auto_profile.vouched_by is None, (
'Profile should not have a voucher.')
non_auto_user = user()
non_auto_profile = non_auto_user.get_profile()
non_auto_profile.save()
assert not non_auto_profile.is_vouched, (
'Profile should not be vouched.')
@override_settings(
AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.ModelBackend'])
class UsernameRedirectionMiddlewareTests(ESTestCase):
# Assertion doesn't matter since we monkey patched it for testing
def test_username_redirection_middleware(self):
"""Test the username redirection middleware."""
auto_user = user(username='lalala')
self.client.login(username=auto_user.username, password='testpass')
response = self.client.get('/%s' % auto_user.username, follow=True)
self.assertTemplateUsed(response, 'phonebook/profile.html')
response = self.client.get('/%s' % 'invaliduser', follow=True)
self.assertTemplateUsed(response, '404.html')
class SearchTests(ESTestCase):
def setUp(self):
self.data = {'country': 'us',
'region': 'California',
'city': 'Mountain View',
'ircname': 'hax0r',
'bio': 'I love ice cream. I code. I tweet.',
'website': 'http://www.example.com',
'full_name': 'Nikos Koukos'}
self.auto_user = user()
self.up = self.auto_user.userprofile
for key, value in self.data.iteritems():
setattr(self.up, key, value)
self.up.save()
def test_search_generic(self):
for key, value in self.data.iteritems():
if key == 'country':
value = COUNTRIES[value]
results = UserProfile.search(value)
self.assertEqual(len(results), 1)
results = UserProfile.search(self.up.full_name)
self.assertEqual(len(results), 1)
results = UserProfile.search('mountain')
self.assertEqual(len(results), 0)
results = UserProfile.search(self.up.full_name[:2])
self.assertEqual(len(results), 1)
results = UserProfile.search(
self.up.bio.split(' ')[3])
self.assertEqual(len(results), 1)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""for_loop and pfor ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.parallel_for.pfor import PFor
from tensorflow.python.ops.parallel_for.pfor import PForConfig
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
"""Runs `loop_fn` `iters` times and stacks the outputs.
Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
stacks corresponding outputs of the different runs.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of tensor
objects. The shape of these outputs should not depend on the input.
loop_fn_dtypes: dtypes for the outputs of `loop_fn`.
iters: Number of iterations for which to run `loop_fn`.
parallel_iterations: The number of iterations that can be dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked output tensor objects with the same
nested structure as the output of `loop_fn`.
"""
flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
is_none_list = []
def while_body(i, *ta_list):
"""Body of while loop."""
fn_output = nest.flatten(loop_fn(i))
if len(fn_output) != len(flat_loop_fn_dtypes):
raise ValueError(
"Number of expected outputs, %d, does not match the number of "
"actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes),
len(fn_output)))
outputs = []
del is_none_list[:]
is_none_list.extend(x is None for x in fn_output)
for out, ta in zip(fn_output, ta_list):
# TODO(agarwal): support returning Operation objects from loop_fn.
if out is not None:
# out may be a ref tensor, wrap it in identity to get a non-ref tensor.
ta = ta.write(i, array_ops.expand_dims(out, 0))
outputs.append(ta)
return tuple([i + 1] + outputs)
if parallel_iterations is not None:
extra_args = {"parallel_iterations": parallel_iterations}
else:
extra_args = {}
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters,
while_body,
[0] + [tensor_array_ops.TensorArray(dtype.base_dtype, iters)
for dtype in flat_loop_fn_dtypes],
**extra_args)[1:]
# TODO(rachelim): enable this for sparse tensors
output = [None if is_none else ta.concat()
for ta, is_none in zip(ta_list, is_none_list)]
assert len(output) in (0, len(flat_loop_fn_dtypes))
if not output:
# This may happen for the case where iters == 0.
return None
else:
return nest.pack_sequence_as(loop_fn_dtypes, output)
def _flatten_first_two_dims(x):
"""Flattens the first two dimensions of x into a single dimension."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]],
axis=0)
return array_ops.reshape(x, new_shape)
PFOR_CONFIG_ARG = "pfor_config"
def _is_under_xla_context():
"""Check if we are currently inside an XLA compile context."""
g = ops.get_default_graph()
while g is not None:
control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access
while control_flow_context is not None:
if control_flow_context.IsXLAContext():
return True
else:
control_flow_context = control_flow_context.outer_context
# If g is a FuncGraph, get its outer_graph.
g = getattr(g, "outer_graph", None)
return False
def pfor(loop_fn, iters, fallback_to_while_loop=True, parallel_iterations=None):
"""Equivalent to running `loop_fn` `iters` times and stacking the outputs.
`pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
times, with input from 0 to `iters - 1`, and stacking corresponding output of
each iteration. However the implementation does not use a `tf.while_loop`.
Instead it adds new operations to the graph that collectively compute the same
value as what running `loop_fn` in a loop would compute.
This is an experimental feature and currently has a lot of limitations:
- There should be no data dependency between the different iterations. For
example, a future iteration should not depend on a value or side-effect of
a previous iteration.
- Stateful kernels may mostly not be supported since these often imply a
data dependency or ordering of the iterations. We do support a limited set
of such stateful kernels though (like RandomFoo, Variable operations like
reads, etc).
- Conversion works only on a limited set of kernels for which a converter
has been registered.
- `loop_fn` has limited support for control flow operations. `tf.cond` in
particular is not supported.
- `loop_fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of `loop_fn` outputs should not depend on the input
to loop_fn.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and optionally a keyword argument `pfor_config` set
to a PForConfig object. It returns a possibly nested structure of Tensor
or Operation objects. Note that if setting `parallel_iterations` argument
to something other than None, `loop_fn` may be called more than once
during graph construction. So it may need to avoid mutating global state.
iters: Number of iterations for which to run `loop_fn`.
fallback_to_while_loop: If true, on failing to vectorize an operation, pfor
fallbacks to using a `tf.while_loop` to dispatch the iterations.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None corresponds to
vectorizing all the iterations. If `parallel_iterations` is smaller than
`iters`, then chunks of at most that many iterations are dispatched in
sequence. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked tensor objects with the same nested
structure as the output of `loop_fn`.
Raises:
ValueError: If parallel_iterations is not None and not an integer > 1.
"""
def f():
return _pfor_impl(loop_fn,
iters,
fallback_to_while_loop=fallback_to_while_loop,
parallel_iterations=parallel_iterations)
# Note that we wrap into a tf.function if in eager execution mode or under
# XLA compilation. The latter is so that we don't compile operations like
# tf.placeholder that are created by the loop body.
functions_run_eagerly = None
if context.executing_eagerly() or _is_under_xla_context():
functions_run_eagerly = def_function.functions_run_eagerly()
if functions_run_eagerly:
logging.warning(
"It looks like tf.function behavior was disabled, perhaps using "
"tf.config.run_functions_eagerly. Vectorization "
"primitives (e.g. tf.vectorized_map) require tf.function to work. "
"These primitives will override the disable.")
def_function.run_functions_eagerly(False)
f = def_function.function(f)
outputs = f()
if functions_run_eagerly is not None:
def_function.run_functions_eagerly(functions_run_eagerly)
return outputs
def _should_expand_composite(value):
return (isinstance(value, composite_tensor.CompositeTensor)
# Leave sparse tensors to be converted by `PFor._convert_sparse`.
and not isinstance(value, sparse_tensor.SparseTensor)
and not isinstance(value, indexed_slices.IndexedSlices))
# pylint: disable=protected-access
def _composite_to_tensors(value):
"""Converts a CompositeTensor into a list of stackable tensors."""
if _should_expand_composite(value):
spec = value._type_spec
if not isinstance(spec, type_spec.BatchableTypeSpec):
raise ValueError("CompositeTensor instance {} returned from "
"parallel_for or vectorized_map loop body must provide "
"a `BatchableTypeSpec` (saw: {}).".format(
value, spec))
return spec._to_tensor_list(value)
return value
# pylint: enable=protected-access
# pylint: disable=protected-access
def _composite_from_tensors(stacked_tensors,
preconverted_value,
batch_size):
"""Converts a list of stacked tensors to a batch CompositeTensor."""
if _should_expand_composite(preconverted_value):
batch_type_spec = preconverted_value._type_spec._batch(batch_size)
return batch_type_spec._from_compatible_tensor_list(stacked_tensors)
return stacked_tensors
# pylint: enable=protected-access
def _loop_fn_has_config(loop_fn):
"""Test if `loop_fn` has a `pfor_config` argument."""
if tf_inspect.isfunction(loop_fn):
argspec = tf_inspect.getargspec(loop_fn)
return PFOR_CONFIG_ARG in argspec.args
elif isinstance(loop_fn, functools.partial):
fn = loop_fn.func
argspec = tf_inspect.getargspec(fn)
return (PFOR_CONFIG_ARG in argspec.args and
PFOR_CONFIG_ARG not in loop_fn.keywords)
else:
loop_class = tf_decorator.unwrap(loop_fn)[1]
if not hasattr(loop_class, "__call__"):
raise ValueError("loop_fn object did not have a __call__ method")
argspec = tf_inspect.getargspec(loop_class.__call__)
return PFOR_CONFIG_ARG in argspec.args
def _pfor_impl(loop_fn,
iters,
fallback_to_while_loop,
parallel_iterations=None,
pfor_config=None):
"""Implementation of pfor."""
assert not context.executing_eagerly()
loop_fn_has_config = _loop_fn_has_config(loop_fn)
existing_ops = set(ops.get_default_graph().get_operations())
iters_value = tensor_util.constant_value(iters)
# Run the loop body
with ops.name_scope("loop_body"):
loop_var = array_ops.placeholder_with_default(0, shape=[])
if loop_fn_has_config:
if pfor_config is None:
pfor_config = PForConfig()
pfor_config._set_iters(iters) # pylint: disable=protected-access
loop_fn_outputs = loop_fn(loop_var, **{PFOR_CONFIG_ARG: pfor_config})
else:
assert pfor_config is None
loop_fn_outputs = loop_fn(loop_var)
loop_fn_output_tensors = nest.map_structure(_composite_to_tensors,
loop_fn_outputs)
# Convert outputs to Tensor if needed.
rewrap_as_ndarray = False
tmp_loop_fn_outputs = []
for loop_fn_output in nest.flatten(loop_fn_output_tensors):
if (loop_fn_output is not None and not isinstance(
loop_fn_output,
(ops.Operation, ops.Tensor, sparse_tensor.SparseTensor))):
if isinstance(loop_fn_output, indexed_slices.IndexedSlices):
logging.warn("Converting %s to a dense representation may make it slow."
" Alternatively, output the indices and values of the"
" IndexedSlices separately, and handle the vectorized"
" outputs directly." % loop_fn_output)
loop_fn_output = ops.convert_to_tensor(loop_fn_output)
elif isinstance(loop_fn_output, np_arrays.ndarray):
loop_fn_output = loop_fn_output.data
rewrap_as_ndarray = True
else:
loop_fn_output = ops.convert_to_tensor(loop_fn_output)
tmp_loop_fn_outputs.append(loop_fn_output)
loop_fn_output_tensors = nest.pack_sequence_as(loop_fn_output_tensors,
tmp_loop_fn_outputs)
new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
iters = ops.convert_to_tensor(iters)
if parallel_iterations is not None:
if parallel_iterations < 1:
raise ValueError("parallel_iterations must be None or a positive integer")
if parallel_iterations == 1:
raise ValueError("Found parallel_iterations == 1. Use for_loop instead.")
if iters_value is not None and iters_value < parallel_iterations:
parallel_iterations = None
if parallel_iterations is None:
with ops.name_scope("pfor"):
converter = PFor(loop_var, iters, new_ops,
fallback_to_while_loop=fallback_to_while_loop,
pfor_config=pfor_config)
flattened_output_tensors = []
for loop_fn_output in nest.flatten(loop_fn_output_tensors):
output = converter.convert(loop_fn_output)
if rewrap_as_ndarray:
output = np_arrays.tensor_to_ndarray(output)
flattened_output_tensors.append(output)
else:
if pfor_config is not None and pfor_config._has_reductions(): # pylint: disable=protected-access
raise ValueError("Setting parallel_iterations currently unsupported if"
" reductions across iterations are performed.")
num_tiled_iterations = iters // parallel_iterations
num_remaining_iterations = iters % parallel_iterations
# TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside
# a tf.function and extract the graph from there to vectorize it.
with ops.name_scope("pfor_untiled"):
converter = PFor(loop_var, num_remaining_iterations, new_ops,
fallback_to_while_loop=fallback_to_while_loop,
pfor_config=pfor_config)
remaining_output_tensors = []
flattened_output_tensors = nest.flatten(loop_fn_output_tensors)
for loop_fn_output in flattened_output_tensors:
output = converter.convert(loop_fn_output)
if rewrap_as_ndarray:
output = np_arrays.tensor_to_ndarray(output)
remaining_output_tensors.append(output)
with ops.name_scope("pfor_tiled"):
loop_fn_dtypes = [ops.convert_to_tensor(x).dtype
for x in flattened_output_tensors]
def tiled_loop_body(j):
offset = j * parallel_iterations + num_remaining_iterations
def tiled_loop_fn(i, pfor_config=None):
if loop_fn_has_config:
loop_fn_outputs = loop_fn(i + offset, pfor_config=pfor_config)
else:
loop_fn_outputs = loop_fn(i + offset)
return nest.flatten(
# Stacking across iterations requires explicit Tensors.
nest.map_structure(_composite_to_tensors, loop_fn_outputs))
return _pfor_impl(
tiled_loop_fn,
parallel_iterations,
fallback_to_while_loop=fallback_to_while_loop,
pfor_config=pfor_config)
tiled_output_tensors = for_loop(
tiled_loop_body, loop_fn_dtypes,
num_tiled_iterations, parallel_iterations=1)
tiled_output_tensors = [
_flatten_first_two_dims(y) for y in tiled_output_tensors]
with ops.name_scope("pfor"):
if iters_value is None or iters_value % parallel_iterations:
output_tensors = control_flow_ops.cond(
math_ops.equal(num_remaining_iterations, 0),
lambda: tiled_output_tensors,
lambda: [array_ops.concat([x, y], axis=0) # pylint: disable=g-long-lambda
for x, y in zip(remaining_output_tensors,
tiled_output_tensors)])
else:
output_tensors = tiled_output_tensors
flattened_output_tensors = nest.flatten(output_tensors)
for output, original_output in zip(flattened_output_tensors,
nest.flatten(loop_fn_output_tensors)):
# Restore any shape information lost from tiling.
# TODO(b/174254748): this may not be correct for stacked `variant`s.
output.set_shape(
tensor_shape.TensorShape([iters_value]).concatenate(
original_output.shape))
if rewrap_as_ndarray:
flattened_output_tensors = [
np_arrays.tensor_to_ndarray(x) for x in flattened_output_tensors]
return nest.map_structure_up_to(
loop_fn_outputs,
functools.partial(_composite_from_tensors, batch_size=iters_value),
nest.pack_sequence_as(loop_fn_output_tensors,
flattened_output_tensors),
loop_fn_outputs)
def _broadcasting_gather(x, i):
"""Wrapper for gather that implicitly broadcasts unit dimensions."""
static_first_dim = tensor_shape.dimension_value(x.shape[0])
if static_first_dim == 1:
i = 0
elif static_first_dim is None:
i = array_ops.where_v2(array_ops.shape(x)[0] > 1, i, 0)
result = array_ops.gather(x, i)
if isinstance(x, np_arrays.ndarray):
result = np_arrays.ndarray.from_tensor(result)
return result
@tf_export("vectorized_map")
def vectorized_map(fn, elems, fallback_to_while_loop=True):
"""Parallel map on the list of tensors unpacked from `elems` on dimension 0.
This method works similar to `tf.map_fn` but is optimized to run much faster,
possibly with a much larger memory footprint. The speedups are obtained by
vectorization (see [Auto-Vectorizing TensorFlow Graphs: Jacobians,
Auto-Batching and Beyond](https://arxiv.org/pdf/1903.04243.pdf)). The idea
behind vectorization is to semantically launch all the invocations of `fn` in
parallel and fuse corresponding operations across all these invocations. This
fusion is done statically at graph generation time and the generated code is
often similar in performance to a manually fused version.
Because `tf.vectorized_map` fully parallelizes the batch, this method will
generally be significantly faster than using `tf.map_fn`, especially in eager
mode. However this is an experimental feature and currently has a lot of
limitations:
- There should be no data dependency between the different semantic
invocations of `fn`, i.e. it should be safe to map the elements of the
inputs in any order.
- Stateful kernels may mostly not be supported since these often imply a
data dependency. We do support a limited set of such stateful kernels
though (like RandomFoo, Variable operations like reads, etc).
- `fn` has limited support for control flow operations.
- `fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of any intermediate or output tensors in the
computation of `fn` should not depend on the input to `fn`.
Examples:
```python
def outer_product(a):
return tf.tensordot(a, a, 0)
batch_size = 100
a = tf.ones((batch_size, 32, 32))
c = tf.vectorized_map(outer_product, a)
assert c.shape == (batch_size, 32, 32, 32, 32)
```
```python
# Computing per-example gradients
batch_size = 10
num_features = 32
layer = tf.keras.layers.Dense(1)
def model_fn(arg):
with tf.GradientTape() as g:
inp, label = arg
inp = tf.expand_dims(inp, 0)
label = tf.expand_dims(label, 0)
prediction = layer(inp)
loss = tf.nn.l2_loss(label - prediction)
return g.gradient(loss, (layer.kernel, layer.bias))
inputs = tf.random.uniform([batch_size, num_features])
labels = tf.random.uniform([batch_size, 1])
per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels))
assert per_example_gradients[0].shape == (batch_size, num_features, 1)
assert per_example_gradients[1].shape == (batch_size, 1)
```
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`, and returns a possibly
nested structure of Tensors and Operations, which may be different than
the structure of `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be mapped over by `fn`. The first dimensions of all
elements must broadcast to a consistent value; equivalently, each
element tensor must have first dimension of either `B` or `1`, for some
common batch size `B >= 1`.
fallback_to_while_loop: If true, on failing to vectorize an operation,
the unsupported op is wrapped in a tf.while_loop to execute the map
iterations. Note that this fallback only happens for unsupported ops and
other parts of `fn` are still vectorized. If false, on encountering an
unsupported op, a ValueError is thrown. Note that the fallbacks can result
in slowdowns since vectorization often yields speedup of one to two orders
of magnitude.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Although they are less common as user-visible inputs and outputs, note that
tensors of type `tf.variant` which represent tensor lists (for example from
`tf.raw_ops.TensorListFromTensor`) are vectorized by stacking the list
contents rather than the variant itself, and so the container tensor will
have a scalar shape when returned rather than the usual stacked shape. This
improves the performance of control flow gradient vectorization.
Raises:
ValueError: If vectorization fails and fallback_to_while_loop is False.
"""
def _convert_to_tensor_or_ndarray(x):
if isinstance(x, np_arrays.ndarray):
return x
return ops.convert_to_tensor(x)
elems = nest.map_structure(_convert_to_tensor_or_ndarray, elems)
def loop_fn(i):
gathered_elems = nest.map_structure(lambda x: _broadcasting_gather(x, i),
elems)
return fn(gathered_elems)
# Extract batch size from the maximum first dimension of any element.
flat_elems = nest.flatten(elems)
def _get_shape(x):
if isinstance(x, np_arrays.ndarray):
x = x.data
if x.shape.rank is None:
return None
return x.shape.as_list()[0]
static_first_dims = [_get_shape(elem) for elem in flat_elems]
if any([s is None for s in static_first_dims]):
batch_size = math_ops.reduce_max(
[array_ops.shape(elem)[0] for elem in flat_elems])
else:
batch_size = max(static_first_dims)
return pfor(loop_fn, batch_size,
fallback_to_while_loop=fallback_to_while_loop)
|
|
################################################################################
#
# Copyright (c) 2011-2014, Alexander Todorov <atodorov@nospam.dif.io>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import cpan
import pypi
import pear
import pear2
import nodejs
import github
import rubygems
import packagist
import mavencentral
import os
import sys
import bugs
import utils
from django.db import models
from datetime import datetime
from django.conf import settings
from utils import URL_ADVISORIES
from django.db.models import Manager
from django.contrib.auth.models import User
from managers import SkinnyManager
try:
FQDN = settings.FQDN
except:
FQDN=""
try:
RUBYGEMS_API_KEY = settings.RUBYGEMS_API_KEY
except:
RUBYGEMS_API_KEY = False
# legacy table names
try:
APPLICATION_DB_TABLE = settings.APPLICATION_DB_TABLE
except:
APPLICATION_DB_TABLE = False
try:
PACKAGE_DB_TABLE = settings.PACKAGE_DB_TABLE
except:
PACKAGE_DB_TABLE = False
try:
PACKAGE_VERSION_DB_TABLE = settings.PACKAGE_VERSION_DB_TABLE
except:
PACKAGE_VERSION_DB_TABLE = False
try:
INSTALLED_PACKAGE_DB_TABLE = settings.INSTALLED_PACKAGE_DB_TABLE
except:
INSTALLED_PACKAGE_DB_TABLE = False
try:
ADVISORY_DB_TABLE = settings.ADVISORY_DB_TABLE
except:
ADVISORY_DB_TABLE = False
# Cloud providers
VENDOR_OPENSHIFT_EXPRESS=0
VENDOR_DOTCLOUD=1
VENDOR_HEROKU=2
VENDOR_CLOUDCONTROL=3
VENDOR_APPFOG=4
# Generic vendors
VENDOR_VIRTUALENV=1000
VENDOR_MANUAL_IMPORT=10000
# used in Django Admin and in
# application dashboard
VENDOR_TYPES = (
(VENDOR_OPENSHIFT_EXPRESS, 'OpenShift'),
(VENDOR_DOTCLOUD, 'dotCloud'),
(VENDOR_HEROKU, 'Heroku'),
(VENDOR_CLOUDCONTROL, 'cloudControl'),
(VENDOR_APPFOG, 'AppFog'),
(VENDOR_VIRTUALENV, 'virtualenv'),
(VENDOR_MANUAL_IMPORT, 'Manual import'),
)
APP_STATUS_REMOVED=-10
APP_STATUS_IMPORTING=-5
APP_STATUS_PENDING=0
# NB: Always keep working states > 0
# since the UI hard codes this
APP_STATUS_SUSPENDED=5
APP_STATUS_APPROVED=10
APP_STATUS_UPTODATE=20
APP_STATUS_NEEDSUPDATE=30
STATUS_TYPES = (
(APP_STATUS_REMOVED, 'Removed'),
(APP_STATUS_IMPORTING, 'Importing'),
(APP_STATUS_PENDING, 'Pending'),
(APP_STATUS_SUSPENDED, 'Suspended'),
(APP_STATUS_APPROVED, 'Approved'),
(APP_STATUS_UPTODATE, 'Up to date'),
(APP_STATUS_NEEDSUPDATE, 'Needs update'),
)
class Application(models.Model):
'''
Stores information about tracked applications
'''
# override default QuerySet manager
objects = SkinnyManager()
class Meta:
if APPLICATION_DB_TABLE:
db_table = APPLICATION_DB_TABLE
owner = models.ForeignKey(User, unique=False)
name = models.CharField(max_length=128)
uuid = models.CharField(max_length=64)
type = models.CharField(max_length=32)
vendor = models.IntegerField(choices=VENDOR_TYPES)
status = models.IntegerField(choices=STATUS_TYPES, db_index=True)
last_checkin = models.DateTimeField(null=True, blank=True, db_index=True)
date_approved = models.DateTimeField(null=True, blank=True, db_index=True)
date_removed = models.DateTimeField(null=True, blank=True)
url = models.URLField()
def __unicode__(self):
return unicode(self.name)
def type_img_48_url(self):
"""
Return the URL with icon for this application
Size: 48x48 px
"""
img_id = None
type_lower = self.type.lower()
if type_lower.find('python') > -1:
img_id = PYPI_PYTHON_PKG
elif type_lower.find('ruby') > -1:
img_id = RUBYGEM_RUBY_PKG
elif type_lower.find('node') > -1:
img_id = NODEJS_PKG
elif type_lower.find('java') > -1:
img_id = "java"
elif type_lower.find('perl') > -1:
img_id = PERL_CPAN_PKG
elif type_lower.find('php') > -1:
img_id = "php"
return "%si/p/t/48/%s.png" % (settings.STATIC_URL, img_id)
# NB: values are hard-coded into the client code
# Do not change, Update only
PYPI_PYTHON_PKG = 0
RUBYGEM_RUBY_PKG = 1
NODEJS_PKG = 2
JAVA_MAVEN_PKG = 300
PERL_CPAN_PKG = 400
PHP_PEAR_PKG = 500
PHP_PEAR2_PKG = 501
PHP_PACKAGIST_PKG = 600
GITHUB_TAGGED_PKG = 2000
# used for Django Admin
PACKAGE_TYPES = (
(PYPI_PYTHON_PKG, 'Python'),
(RUBYGEM_RUBY_PKG, 'Ruby'),
(NODEJS_PKG, 'Node.js'),
(JAVA_MAVEN_PKG, 'Java'),
(PERL_CPAN_PKG, 'Perl'),
(PHP_PEAR_PKG, 'PHP'),
(PHP_PEAR2_PKG, 'PHP'),
(PHP_PACKAGIST_PKG, 'PHP'),
(GITHUB_TAGGED_PKG, 'GitHub project'),
)
# Used to get the proper callbacks
PACKAGE_CALLBACKS = {
PYPI_PYTHON_PKG: {
'compare_versions' : pypi.compare_versions,
'get_url' : pypi.get_url,
'get_latest' : pypi.get_latest,
'find_date' : pypi.get_release_date,
'get_download_url' : pypi.get_download_url,
'get_latest_packages_from_rss' : pypi.get_latest_from_rss,
},
RUBYGEM_RUBY_PKG: {
'compare_versions' : rubygems.compare_versions,
'get_url' : rubygems.get_url,
'get_latest' : rubygems.get_latest,
'find_date' : rubygems.get_release_date,
'get_download_url' : rubygems.get_download_url,
# if RUBYGEMS_API_KEY is defined disable RSS imports
# Need to manually register the .../hook/rubygems/ path with RubyGems.org
# See urls.py/views.py and http://guides.rubygems.org/rubygems-org-api/#webhook_methods
'get_latest_packages_from_rss' : None if RUBYGEMS_API_KEY else rubygems.get_latest_from_rss,
},
NODEJS_PKG: {
'compare_versions' : nodejs.compare_versions,
'get_url' : nodejs.get_url,
'get_latest' : nodejs.get_latest,
'find_date' : nodejs.get_release_date,
'get_download_url' : nodejs.get_download_url,
'get_latest_packages_from_rss' : nodejs.get_latest_from_rss,
},
JAVA_MAVEN_PKG: {
'compare_versions' : None,
'get_url' : mavencentral.get_url,
'get_latest' : mavencentral.get_latest,
'find_date' : mavencentral.get_release_date,
'get_download_url' : mavencentral.get_download_url,
'get_latest_packages_from_rss' : mavencentral.get_latest_from_rss,
},
PERL_CPAN_PKG: {
'compare_versions' : None,
'get_url' : cpan.get_url,
'get_latest' : cpan.get_latest,
'find_date' : cpan.get_release_date,
'get_download_url' : cpan.get_download_url,
'get_latest_packages_from_rss' : cpan.get_latest_from_rss,
},
PHP_PEAR_PKG: {
'compare_versions' : None,
'get_url' : pear.get_url,
'get_latest' : pear.get_latest,
'find_date' : pear.get_release_date,
'get_download_url' : pear.get_download_url,
'get_latest_packages_from_rss' : pear.get_latest_from_rss,
},
PHP_PEAR2_PKG: {
'compare_versions' : None,
'get_url' : pear2.get_url,
'get_latest' : pear2.get_latest,
'find_date' : pear2.get_release_date,
'get_download_url' : pear2.get_download_url,
'get_latest_packages_from_rss' : pear2.get_latest_from_rss,
},
PHP_PACKAGIST_PKG: {
'compare_versions' : None,
'get_url' : packagist.get_url,
'get_latest' : packagist.get_latest,
'find_date' : packagist.get_release_date,
'get_download_url' : packagist.get_download_url,
'get_latest_packages_from_rss' : packagist.get_latest_from_rss,
},
GITHUB_TAGGED_PKG: {
'compare_versions' : None,
'get_url' : github.get_url,
'get_latest' : github.get_latest_from_tag,
'find_date' : github.get_release_date_from_tag,
'get_download_url' : github.get_download_url_from_tag,
'get_latest_packages_from_rss' : None,
},
}
# used for Package, PackageVersion and Advisory
STATUS_DROPPED = -10
STATUS_NEW = 0
STATUS_MODIFIED = 5
STATUS_ASSIGNED = 10
STATUS_VERIFIED = 20
STATUS_PUSH_READY = 30
STATUS_LIVE = 40
PACKAGE_STATUSES = (
(STATUS_NEW, 'NEW'), # new package submitted to DB. Not processed by a person
(STATUS_MODIFIED, 'MODIFIED'), # automatically modified, e.g. by cron
(STATUS_ASSIGNED, 'ASSIGNED'), # assigned to somebody to collect website, source url, etc.
(STATUS_VERIFIED, 'VERIFIED'), # all information has been collected and verified
)
#NB: The names are displayed on the public site
BUG_TRACKER_CHOICES = (
(bugs.BUG_TYPE_NONE, "N/A"),
(bugs.BUG_TYPE_UNKNOWN, "Unknown"),
(bugs.BUG_TYPE_GITHUB, "GitHub"),
(bugs.BUG_TYPE_BUGZILLA, "Bugzilla"),
(bugs.BUG_TYPE_BITBUCKET, "Bitbucket"),
(bugs.BUG_TYPE_LAUNCHPAD, "Launchpad"),
(bugs.BUG_TYPE_GOOGLE, "Google Code"),
(bugs.BUG_TYPE_TRAC, "Trac"),
(bugs.BUG_TYPE_ROUNDUP, "Roundup Issue Tracker"),
(bugs.BUG_TYPE_SOURCEFORGE, "SourceForge.net"),
(bugs.BUG_TYPE_LIGHTHOUSE, "Lighthouse"),
(bugs.BUG_TYPE_RT, "RT: Request Tracker"),
(bugs.BUG_TYPE_PLONE, "Plone"),
(bugs.BUG_TYPE_RT_PERL_ORG, "RT: rt.perl.org"),
(bugs.BUG_TYPE_YUI_TRACKER, "YUI Library tracker"),
(bugs.BUG_TYPE_PIVOTAL_TRACKER, "Pivotal Tracker"),
(bugs.BUG_TYPE_PEAR_PHP_NET, "pear.php.net bug tracker"),
(bugs.BUG_TYPE_RUBYFORGE, "RubyForge.org bug tracker"),
(bugs.BUG_TYPE_REDMINE, "Redmine"),
(bugs.BUG_TYPE_LOGILAB_ORG, "Logilab.org/CubicWeb"),
(bugs.BUG_TYPE_JIRA, "Jira"),
(bugs.BUG_TYPE_WINCENT, "wincent.com"),
)
#NB: The names are displayed on the public site
SCM_TYPES = (
(utils.SCM_UNKNOWN, "Unknown"),
(utils.SCM_GIT, "Git"),
(utils.SCM_MERCURIAL, "Mercurial"),
(utils.SCM_BAZAAR, "Bazaar"),
(utils.SCM_SUBVERSION, "Subversion"),
(utils.SCM_CVS, "CVS"),
(utils.SCM_METACPAN, "metacpan.org"),
(utils.SCM_TARBALL, "Tarball (.tar.gz, .tar.bz2, .tgz)"),
)
# used for directory naming and such
SCM_SHORT_NAMES = {
utils.SCM_GIT : 'git',
utils.SCM_MERCURIAL : 'hg',
utils.SCM_BAZAAR : 'bzr',
utils.SCM_SUBVERSION : 'svn',
utils.SCM_CVS : 'cvs',
utils.SCM_METACPAN : 'metacpan',
utils.SCM_TARBALL : 'tarball',
utils.SCM_APIGEN : 'api',
utils.SCM_MAGIC : 'magic',
}
# used below to construct paths to the bin directory
LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))
HGBIN = 'hg'
HG_DIFF_PLUS_STAT = LOCAL_DIR + '/bin/hg_diff_stat'
BZRBIN = 'bzr'
BZR_DIFFSTAT = LOCAL_DIR + '/bin/bzr-diffstat'
BZR_DIFF_PLUS_STAT = LOCAL_DIR + '/bin/bzr_diff_stat'
DIFF_METACPAN_BIN = LOCAL_DIR + '/diff_metacpan'
# make use of the locally installed bzr/hg if possible
if os.environ.has_key('OPENSHIFT_HOMEDIR'):
# we're on OpenShift - not running the workers there usually
HGBIN = os.environ['OPENSHIFT_HOMEDIR'] + '/python-2.6/virtenv/bin/' + HGBIN
BZRBIN = os.environ['OPENSHIFT_HOMEDIR'] + '/python-2.6/virtenv/bin/' + BZRBIN
elif hasattr(sys, 'real_prefix') and os.path.exists(sys.prefix+'/bin/'):
# we're inside a virtualenv and process was started with ~/.virtualenv/name/bin/python script.py
HGBIN = sys.prefix + '/bin/' + HGBIN
HG_DIFF_PLUS_STAT = "%s/bin/python %s" % (sys.prefix, HG_DIFF_PLUS_STAT)
BZRBIN = sys.prefix + '/bin/' + BZRBIN
# BZR_DIFFSTAT = "%s/bin/python %s" % (sys.prefix, BZR_DIFFSTAT)
BZR_DIFF_PLUS_STAT = "%s/bin/python %s" % (sys.prefix, BZR_DIFF_PLUS_STAT)
DIFF_METACPAN_BIN = "%s/bin/python %s" % (sys.prefix, DIFF_METACPAN_BIN)
elif os.environ.has_key('HOME') and os.path.exists(os.environ['HOME']+'/.virtualenvs/difio/bin/'):
# $HOME is defined - try to find a sane virtualenv
# NB: $HOME is not defined when `service celeryd start` is run
HGBIN = os.environ['HOME'] + '/.virtualenvs/difio/bin/' + HGBIN
BZRBIN = os.environ['HOME'] + '/.virtualenvs/difio/bin/' + BZRBIN
# clone/checkout commands
# arguments are <scmurl> <directory>
SCM_CLONE_CMD = {
utils.SCM_GIT : 'git clone %s %s',
utils.SCM_MERCURIAL : HGBIN + ' clone %s %s',
utils.SCM_BAZAAR : BZRBIN + ' branch --use-existing-dir %s %s',
utils.SCM_SUBVERSION : 'svn checkout --non-interactive --trust-server-cert %s %s',
utils.SCM_CVS : 'cvs %s && mv cvstmp %s', # -z6 -d:pserver:anonymous@python-ldap.cvs.sourceforge.net:/cvsroot/python-ldap co -d cvstmp -P python-ldap
utils.SCM_METACPAN : None,
utils.SCM_TARBALL : 'echo %s >/dev/null; cd %s && git init',
utils.SCM_APIGEN : 'echo %s >/dev/null; cd %s && git init',
}
# pull/update sources
SCM_PULL_CMD = {
utils.SCM_GIT : 'git pull && git fetch --tags',
utils.SCM_MERCURIAL : HGBIN + ' pull',
utils.SCM_BAZAAR : BZRBIN + ' merge',
utils.SCM_SUBVERSION : 'svn update --non-interactive --trust-server-cert',
utils.SCM_CVS : 'cvs update -dPA',
utils.SCM_METACPAN : None,
utils.SCM_TARBALL : None,
utils.SCM_APIGEN : None,
}
# diff changelog
# arguments <old-rev> <new-rev> <changelog>
SCM_DIFF_CHANGELOG_CMD = {
utils.SCM_GIT : 'git diff -M %s..%s -- %s',
utils.SCM_MERCURIAL : HGBIN + ' diff -r %s -r %s %s',
utils.SCM_BAZAAR : BZRBIN + ' diff -r %s..%s %s',
utils.SCM_SUBVERSION : 'svn diff --non-interactive --trust-server-cert -r %s:%s %s',
utils.SCM_CVS : 'cvs diff -u -r %s -r %s %s',
utils.SCM_METACPAN : DIFF_METACPAN_BIN + ' %s %s %s',
utils.SCM_APIGEN : None,
}
SCM_DIFF_CHANGELOG_CMD[utils.SCM_TARBALL] = SCM_DIFF_CHANGELOG_CMD[utils.SCM_GIT]
# diff all files
# arguments <old-rev> <new-rev>
SCM_DIFF_ALL_CMD = {
utils.SCM_GIT : 'git diff -M %s..%s',
utils.SCM_MERCURIAL : HGBIN + ' diff -r %s -r %s',
utils.SCM_BAZAAR : BZRBIN + ' diff -r %s..%s',
utils.SCM_SUBVERSION : 'svn diff --non-interactive --trust-server-cert -r %s:%s',
utils.SCM_CVS : 'cvs diff -u -r %s -r %s',
utils.SCM_METACPAN : DIFF_METACPAN_BIN + ' %s %s',
}
SCM_DIFF_ALL_CMD[utils.SCM_TARBALL] = SCM_DIFF_ALL_CMD[utils.SCM_GIT]
SCM_DIFF_ALL_CMD[utils.SCM_APIGEN] = 'git diff -M -u --stat %s..%s'
# diff stat shows full stat.
# arguments <old-rev> <new-rev>
SCM_DIFF_STAT_CMD = {
utils.SCM_GIT : 'git diff --stat -M %s..%s',
utils.SCM_MERCURIAL : HGBIN + ' diff --stat -r %s -r %s',
utils.SCM_BAZAAR : BZR_DIFF_PLUS_STAT + ' ' + BZR_DIFFSTAT + ' %s %s 1',
#todo add diff summary below
utils.SCM_SUBVERSION : None, # "svn diff -r %s:%s --summarize", # doesn't print totals at the end
utils.SCM_CVS : None,
utils.SCM_METACPAN : None, # todo: fix me
}
SCM_DIFF_STAT_CMD[utils.SCM_TARBALL] = SCM_DIFF_STAT_CMD[utils.SCM_GIT]
SCM_DIFF_STAT_CMD[utils.SCM_APIGEN] = 'git diff -M --shortstat %s..%s'
# get commit log
# arguments <old-rev> <new-rev>
SCM_LOG_CMD = {
utils.SCM_GIT : 'git log %s..%s',
utils.SCM_MERCURIAL : HGBIN + ' log -r %s:%s',
utils.SCM_BAZAAR : BZRBIN + ' log -r %s..%s',
utils.SCM_SUBVERSION : 'svn log --non-interactive --trust-server-cert -r %s:%s',
utils.SCM_CVS : 'cvs log -r %s:%s *',
utils.SCM_METACPAN : None,
utils.SCM_TARBALL : None,
utils.SCM_APIGEN : None,
}
# get commit log under particular path
# arguments <old-rev> <new-rev> <path>
SCM_LOG_PATH_CMD = {
utils.SCM_GIT : 'git log %s..%s -- %s',
utils.SCM_MERCURIAL : HGBIN + ' log -r %s:%s %s',
utils.SCM_BAZAAR : BZRBIN + ' log -r %s..%s %s',
utils.SCM_SUBVERSION : 'svn log --non-interactive --trust-server-cert -r %s:%s %s',
utils.SCM_CVS : 'cvs log -r %s:%s %s',
utils.SCM_METACPAN : None,
utils.SCM_TARBALL : None,
utils.SCM_APIGEN : None,
}
# list tags commands
SCM_LIST_TAGS_CMD = {
utils.SCM_GIT : 'git tag', # this is inacurate 'for t in `git tag`; do sha=`git show --format=format:"%H" $t`; echo $t,$sha | cut -f1 -d" "; done',
utils.SCM_MERCURIAL : HGBIN + ' tags',
utils.SCM_BAZAAR : BZRBIN + ' tags',
utils.SCM_SUBVERSION : None,
utils.SCM_CVS : None,
utils.SCM_METACPAN : None,
utils.SCM_TARBALL : None,
utils.SCM_APIGEN : None,
}
class Package(models.Model):
'''
Stores information about software package
'''
# override default QuerySet manager
objects = SkinnyManager()
class Meta:
permissions = (
("package_modify_all", "Can modify all fields"),
)
if PACKAGE_DB_TABLE:
db_table = PACKAGE_DB_TABLE
name = models.CharField(max_length=128, blank=True, db_index=True)
type = models.IntegerField('Package Type', choices=PACKAGE_TYPES, null=True, blank=True, db_index=True)
website = models.URLField(null=True, blank=True)
scmurl = models.CharField('URL to check-out source', max_length=256, null=True, blank=True)
scmurl.help_text = "For example git read-only url. NB: if SCM Type is Tarball then URL is N/A"
scmtype = models.IntegerField('Type of SCM', choices=SCM_TYPES, default=utils.SCM_UNKNOWN, db_index=True)
bugurl = models.CharField('Format string for bug URLs', max_length=256, null=True, blank=True)
bugurl.help_text = 'e.g. http://bugzilla.redhat.com/%d'
bugtype = models.IntegerField('Bug tracker type', choices=BUG_TRACKER_CHOICES, default=bugs.BUG_TYPE_UNKNOWN, db_index=True)
changelog = models.CharField('Name of change log file', max_length=256, null=True, blank=True)
changelog.help_text = 'This is used to automatically generate details about an advisory'
status = models.IntegerField(choices=PACKAGE_STATUSES, default=STATUS_NEW, db_index=True)
assigned_to = models.CharField(blank=True, null=True, max_length=64)
last_checked = models.DateTimeField(null=True, blank=True, default=datetime(2000, 01, 01), db_index=True)
# this is set automatically and used when searching to display updates
# it is always set to the latest according to package specific version sorting function
latest_version = models.CharField(max_length=64, null=True, blank=True)
# used for multiple subpackages in the same repo
subpackage_path = models.CharField(max_length=256, null=True, blank=True)
# when added to DB. used internally wrt manual Package additions
added_on = models.DateTimeField(db_index=True, default=datetime.now)
def __unicode__(self):
return unicode(self.name)
def index_url(self):
"""
Return the URL in the package index.
"""
if self.type == PYPI_PYTHON_PKG:
return "https://pypi.python.org/pypi/%s" % pypi._other_name(self.name)
elif self.type == RUBYGEM_RUBY_PKG:
return "https://rubygems.org/gems/%s" % self.name
elif self.type == NODEJS_PKG:
return "https://npmjs.org/package/%s" % self.name
elif self.type == JAVA_MAVEN_PKG:
[gid, aid] = mavencentral._groupid_artifactid(self.name)
if self.latest_version:
return "http://search.maven.org/#artifactdetails|%s|%s|%s|" % (gid, aid, self.latest_version)
else:
return "http://search.maven.org/#search|ga|1|g%%3A%%22%s%%22%%20AND%%20a%%3A%%22%s%%22" % (gid, aid)
elif self.type == PERL_CPAN_PKG:
return "https://metacpan.org/release/%s" % '-'.join(cpan._other_name(self.name).split('::'))
elif self.type == PHP_PEAR_PKG:
return "http://pear.php.net/package/%s" % self.name
elif self.type == PHP_PEAR2_PKG:
return "http://pear2.php.net/%s" % self.name
elif self.type == PHP_PACKAGIST_PKG:
return "https://packagist.org/packages/%s" % self.name
elif self.type == GITHUB_TAGGED_PKG:
return "https://github.com/%s" % self.name
else:
return "UNKNOWN"
def type_img_48_url(self):
"""
Return the URL with icon for this package type
Size: 48x48 px
"""
return "%si/p/t/48/%d.png" % (settings.STATIC_URL, self.type)
PACKAGE_VERSION_STATUSES = (
(STATUS_NEW, 'NEW'), # new version found by automated tools. Not processed by a person
(STATUS_MODIFIED, 'MODIFIED'), # automatically modified, e.g. by cron
(STATUS_ASSIGNED, 'ASSIGNED'), # assigned to somebody to inspect and collect info
(STATUS_VERIFIED, 'VERIFIED'), # all information has been collected and verified
)
class PackageVersion(models.Model):
"""
Describes different versions of the same package
"""
# override default QuerySet manager
objects = SkinnyManager()
class Meta:
permissions = (
("packageversion_modify_all", "Can modify all fields"),
)
if PACKAGE_VERSION_DB_TABLE:
db_table = PACKAGE_VERSION_DB_TABLE
package = models.ForeignKey(Package, unique=False)
version = models.CharField(max_length=64, db_index=True)
scmid = models.CharField('Branch/tag/commit/revision for this version', max_length=128, null=True, blank=True)
status = models.IntegerField(choices=PACKAGE_VERSION_STATUSES, default=STATUS_NEW, db_index=True)
assigned_to = models.CharField(blank=True, null=True, max_length=64)
released_on = models.DateTimeField(null=True, blank=True, db_index=True)
download_url = models.CharField(blank=True, null=True, max_length=200)
download_url.help_text = 'URL to package SOURCE, e.g. http://project.org/downloads/project-1.0.tar.gz'
size = models.IntegerField('Size in bytes', default=None, null=True, blank=True)
# when added to DB. used internally wrt manual PackageVersion additions
added_on = models.DateTimeField(db_index=True, default=datetime.now)
def __unicode__(self):
return unicode("%s-%s" % (self.package, self.version))
class InstalledPackage(models.Model):
"""
A package that is installed into an Application
"""
# override default QuerySet manager
objects = SkinnyManager()
class Meta:
if INSTALLED_PACKAGE_DB_TABLE:
db_table = INSTALLED_PACKAGE_DB_TABLE
# NB: no joins here
application = models.IntegerField(null=False, db_index=True, default=0)
owner = models.IntegerField(null=False, db_index=True)
package = models.IntegerField(null=False, db_index=True)
version = models.IntegerField(null=False, db_index=True, default=0)
SEVERITY_TYPES = (
(utils.SEVERITY_UNKNOWN, 'Unknown'),
(utils.SEVERITY_LOW, 'Low'),
(utils.SEVERITY_MEDIUM, 'Medium'),
(utils.SEVERITY_HIGH, 'High'),
)
ADVISORY_STATUSES = (
(STATUS_DROPPED, 'DROPPED'), # DROPPED, NO_SHIP
(STATUS_NEW, 'NEW'), # new advisory generated by automated tools. Not processed by a person
(STATUS_MODIFIED, 'MODIFIED'), # automatic collection of information has completed
(STATUS_ASSIGNED, 'ASSIGNED'), # assigned to somebody to inspect and collect additional info
(STATUS_PUSH_READY, 'PUSH_READY'), # all info collected and verified, ready to publish live
(STATUS_LIVE, 'LIVE'), # advisory has been pushed live already. the point of no return.
)
# See also:
# http://rhn.redhat.com/errata/RHBA-2011-1642.html
# https://rhn.redhat.com/errata/RHSA-2011-1323.html
class Advisory(models.Model):
"""
Represents updates information between two versions of a package.
"""
# override default QuerySet manager
objects = SkinnyManager()
class Meta:
permissions = (
("advisory_modify_all", "Can modify all fields"),
("advisory_drop", "Can DROP advisories"),
)
if ADVISORY_DB_TABLE:
db_table = ADVISORY_DB_TABLE
old = models.ForeignKey(PackageVersion, unique=False, related_name='Old version')
new = models.ForeignKey(PackageVersion, unique=False, related_name='New version')
#TODO: replace this with a string similarity index based on difflib.SequenceMatcher
# and then reverse utils.which_severity() b/c 100% means no changes, 0% means totally different
# and maybe remove this field altogether and add the data to more.json
type = models.IntegerField('Change rate %', default=None, null=True, blank=True)
# type.help_text = "NB: Since 2012-04-20 this holds the change rate %"
severity = models.IntegerField(choices=SEVERITY_TYPES, default=utils.SEVERITY_UNKNOWN)
# severity.help_text = "NB: Since 2012-04-20 this is READ-ONLY and based on change rate %"
status = models.IntegerField(choices=ADVISORY_STATUSES, default=STATUS_NEW, db_index=True)
assigned_to = models.CharField(blank=True, null=True, max_length=64)
# when information in DB was generated
last_updated = models.DateTimeField(null=True, blank=True, db_index=True)
has_static_page = models.BooleanField(default=False, db_index=True)
overriden = models.BooleanField(default=False, db_index=True)
def __unicode__(self):
return unicode("%s to %s" % (self.old, self.new))
@classmethod
def get_full_path_from_string(cls, name, old, new, pk):
"""
Used internally to avoid DB hits.
"""
return '%s/%s/%s-%s/%s-%s/%d/' % (FQDN, URL_ADVISORIES, name, old, name, new, pk)
def get_path(self):
return '/%s/%s/%s/%d/' % (URL_ADVISORIES, self.old, self.new, self.id)
def get_full_path(self):
return FQDN + self.get_path()
def get_title(self): # FALSE NEGATIVE, used in templates
return "Changes between %s and %s" % (self.old, self.new)
def severity_img(self): # FALSE NEGATIVE used in templates
"""
Return the HTML img tag with icon representing Severity
"""
sev_display = self.get_severity_display()
return "<img src='%si/s/%s.png' alt='%s' title='%s' />" % (settings.STATIC_URL, self.severity, sev_display, sev_display)
class ApplicationHistory(models.Model):
"""
Records package history as text.
"""
# override default QuerySet manager
objects = SkinnyManager()
application = models.ForeignKey(Application, unique=False)
when_added = models.DateTimeField(db_index=True)
packages = models.TextField(null=True, blank=True)
comments = models.CharField(max_length=256, null=True, blank=True)
def __unicode__(self):
return unicode("%s - %s" % (self.application.name, self.when_added))
class Bug(models.Model):
"""
Holds bugs description.
"""
# override default QuerySet manager
objects = SkinnyManager()
advisory = models.ForeignKey(Advisory, unique=False)
number = models.IntegerField(db_index=True) # for bugs dedup maybe ???
title = models.CharField(max_length=256, null=True, blank=True)
url = models.URLField()
context = models.CharField(max_length=256, null=True, blank=True)
reported_on = models.DateTimeField(db_index=True, null=True, blank=True, default=None) # indexes for future queries
closed_on = models.DateTimeField(db_index=True, null=True, blank=True, default=None)
def __unicode__(self):
return unicode("%s - %d: %s" % (self.url, self.number, self.title))
class AbstractMockProfile(models.Model):
"""
Any AUTH_PROFILE_MODULE class should inherit from this
and override the default methods. You can also override
the default objects manager!
"""
objects = SkinnyManager()
user = models.ForeignKey(User, unique=True)
def get_email_delay(self):
return 1 # notify every day
def is_subscribed(self):
return True
def get_subscription_plan_name(self):
return "Beaker"
class Meta:
abstract = True
class MockProfileManager(models.Manager):
"""
This manager creates MockProfile's on the fly without
touching the database. It is needed by User.get_profile()
b/c we can't have an abstract base class as AUTH_PROFILE_MODULE.
"""
def using(self, *args, **kwargs):
"""
It doesn't matter which database we use. Return self
b/c everything happens in memory.
"""
return self
def get(self, *args, **kwargs):
"""
User.get_profile() calls .using().get(user_id__exact=X)
so we instrument it here to return a MockProfile() with
user_id=X parameter. Anything else will probably break!!!
"""
params = {}
for p in kwargs.keys():
params[p.split("__")[0]] = kwargs[p]
return MockProfile(params)
class MockProfile(AbstractMockProfile):
"""
In-memory (fake) profile class used by default for
the AUTH_PROFILE_MODULE setting.
NB: this class is for demonstration purposes only!
Use your own implementation when deploying Difio!
"""
objects = MockProfileManager()
class Meta:
managed = False
|
|
import math
import pygame
import time
from random import uniform, choice
from itertools import cycle
import binball_game.collision as collision
import binball_game.events as events
class Point():
def __init__(self, x, y=None):
self.x = x
self.y = y
def copy(self):
return Point(self.x, self.y)
def ints(self):
return (int(self.x), int(self.y))
def dot(self, v):
return self.x*v.x + self.y*v.y
def __add__(self, v):
return Point(self.x+v.x, self.y+v.y)
def __sub__(self, v):
return Point(self.x-v.x, self.y-v.y)
def __mul__(self, v):
try:
return Point(self.x*v.x, self.y*v.y)
except AttributeError:
return Point(self.x*v, self.y*v)
def __truediv__(self, v):
try:
return Point(self.x/v.x, self.y/v.y)
except AttributeError:
return Point(self.x/v, self.y/v)
def __neg__(self):
return Point(self.x, self.y) * Point(-1, -1)
def __str__(self):
return "Point({:.3f}, {:.3f})".format(self.x, self.y)
def __repr__(self):
return str(self)
class Segment():
"""Line segment with which ball can interact
Parameters
----------
a : tuple
Location of beginning of segment
b : tuple
Location of ending of segment
Attributes
----------
angle : float
angle of segment in radians, where a horizontal segment is 0 or pi
"""
def __init__(self, a, b, value=0, noise='seg2', color=(0,0,0)):
self.a = Point(*a)
self.b = Point(*b)
self.angle = (math.atan2(self.b.x-self.a.x, self.b.y-self.a.y) + math.pi/2) % (2*math.pi)
self.value = value
self.noise = noise
self.color = color
self.thickness = 10
def __repr__(self):
base = '{}({}\n{}\nAngle: {:.2f})\n'
return base.format(self.__class__.__name__, self.a, self.b, self.angle)
class Platforms():
""" """
def __init__(self, start_pt1, start_pt2, noise=''):
self.seg_1 = Segment(start_pt1, (start_pt1[0]+50, start_pt1[1]))
self.seg_2 = Segment(start_pt2,
(start_pt2[0]+50, start_pt2[1]),
color=(184, 199, 224))
self.distance = 600-41-200-50
range_ = range(start_pt1[0], start_pt1[0]+self.distance, 2)
self.pos_gen = cycle((*range_, *range_[::-1]))
def update(self):
new_pos = next(self.pos_gen)
self.seg_1.a.x = new_pos
self.seg_1.b.x = new_pos + 50
self.seg_2.a.x = new_pos
self.seg_2.b.x = new_pos + 50
class Particle():
""" A circular object with a velocity, size and mass """
def __init__(self, x, y, size, value=0, noise='jump', bin_gravity=0.01):
self.x = x
self.y = y
self.size = size
self.noise = noise
self.value = value
self.pos = Point(x, y)
self.color = (0, 0, 255)
self.thickness = 0
self.max_speed = 25
self._speed = 0
self.angle = math.pi/2
self.mass = 1
self.drag = 1#.998
self.elasticity = 0.82
self.original_gravity = (3/2*math.pi, 0.065)
self.bin_gravity = (3/2*math.pi, bin_gravity)
self.gravity = self.original_gravity
self.score = 0
self.collision_partner = None
def __repr__(self):
return 'Particle({})'.format(self.pos)
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, val):
"""Limit speed so ball can't pass through objects or move too fast"""
#self._speed = min(.5*self.size-1, val)
self._speed = min(self.max_speed, val)
def move(self):
self.angle, self.speed = self.addVectors(self.angle,
self.speed,
self.gravity[0],
self.gravity[1])
self.x += math.cos(self.angle) * self.speed
self.y -= math.sin(self.angle) * self.speed
self.pos = Point(self.x, self.y)
self.speed *= self.drag
def wall_bounce(self, width, height):
if self.x > width - self.size:
self.x = 2*(width - self.size) - self.x
self.angle = (math.pi - self.angle) % (2*math.pi)
self.speed *= self.elasticity
elif self.x < self.size:
self.x = 2*self.size - self.x
self.angle = (math.pi - self.angle) % (2*math.pi)
self.speed *= self.elasticity
if self.y > height - self.size:
self.y = 2*(height - self.size) - self.y
self.angle = -self.angle % (2*math.pi)
self.speed *= self.elasticity
elif self.y < self.size:
self.y = 2*self.size - self.y
self.angle = - self.angle % (2*math.pi)
self.speed *= self.elasticity
def seg_bounce(self, segment_list):
"""Check for collision with all segments. Update attributes appropriately.
Parameters
----------
segment_list : [Segment]
All segments in the model
"""
for seg in segment_list:
did_collide = collision.segment_particle(seg, self)
if did_collide:
self.collision_partner = seg
self.angle = (2*seg.angle - self.angle) % (2*math.pi)
self.speed *= self.elasticity
while collision.segment_particle(seg, self):
self.x += math.cos(self.angle)
self.y -= math.sin(self.angle)
self.pos = Point(self.x, self.y)
def particle_bounce(self, particle_list):
"""Check for collision with all particles. Update attributes appropriately.
Parameters
----------
segment_list : [Particle]
All particles in the model
"""
for particle in particle_list:
collision_occurs = collision.ball_circle(self, particle, True)
if collision_occurs:
self.collision_partner = particle
self.speed *= self.elasticity
def bounce(self, width, height, segment_list, particle_list):
self.wall_bounce(width, height)
self.seg_bounce(segment_list)
self.particle_bounce(particle_list)
def addVectors(self,angle1, length1, angle2, length2):
""" Returns the sum of two vectors """
x = math.sin(angle1) * length1 + math.sin(angle2) * length2
y = math.cos(angle1) * length1 + math.cos(angle2) * length2
angle = math.atan2(x, y) % (2*math.pi)
length = math.hypot(x, y)
return (angle, length)
class Coin(Particle):
"""An circular object with a value """
def __init__(self, x, y, size, value, noise='coins'):
super().__init__(x, y, size, value=value, noise=noise)
self.color = (255,215,0)
self.coin_timer = 0
def pressed_bonus(self):
message = None
self.coin_timer = time.time()
class Tube(Particle):
def __init__(self, x, y, size, drop_spot, ejection_angle,
value=85, noise='suck'):
super().__init__(x, y, size, value=value, noise=noise)
self.drop_spot = drop_spot
self.ejection_angle = ejection_angle
self.color = (22, 153, 19)
class TubeManager():
"""Repsonsible for controlling and updating Tube components
Notes
-----
This departs from the style of the rest of the components.
Usually collision detection and updating is handled by the Model.
Because the tubes are 'connected', this is a good opportunity to test this style.
Parameters
----------
tube_list
"""
def __init__(self, tube_list):
self.tube_list = tube_list
def teleport_ball(self, ball, tube):
"""Eject the ball from the drop spot of a different tube
Parameters
----------
ball : Particle
Player ball
tube : Tube
Tube with which the ball originally collided
"""
other_tubes = [t for t in self.tube_list if t is not tube]
new_tube = choice(other_tubes)
ball.x, ball.y = new_tube.drop_spot
ball.angle = new_tube.ejection_angle + uniform(-.05, .05)
def update(self, ball):
"""Checks for ball collisions and updates state appropriately.
Parameters
----------
ball : Particle
Player ball
Returns
-------
did_collide : bool
True if ball interacted with one of the tubes
points : int
Value of tube doing the transporting
"""
points = 0
for tube in self.tube_list:
did_collide = collision.ball_circle(ball, tube)
if did_collide:
points = tube.value
self.teleport_ball(ball, tube)
break
return did_collide, points
class Bin():
reload_time = 3
last_pressed = 0
reloaded = True
def __init__(self, num, rekt, color, noise):
self.num = num
self.rekt = rekt
self.color = color
self.noise = noise
self.out_noise = 'flipper'
self._active = False
self.active_color = (0, 0, 0)
self.locked_color = (255, 255, 255)
self.original_color = color
@property
def active(self):
return self._active
@active.setter
def active(self, value):
self._active = value
if self.active:
self.color = self.active_color
else:
self.color = self.original_color
def pressed_event(self, ball):
"""Key press is only valid if the Bins are currently reloaded"""
message = None
if Bin.reloaded:
Bin.last_pressed = time.time()
message = self.do_key_press(ball)
else:
message = events.PressedBinEval(self.num, False)
return message
def do_key_press(self, ball):
message = events.PressedBinEval(self.num, True)
if self.rekt.collidepoint(ball.x, ball.y):
Bin.last_pressed = 0
message = events.PressedBinEval(self.num, 'collide')
ball.speed = ball.max_speed * .75
frac_of_bin = ((ball.y-self.rekt.top)/self.rekt.height)
ball.angle = (0.25 + frac_of_bin*0.5)*math.pi
ball.gravity = ball.original_gravity
ball.y = self.rekt.top - 15
self.active = False
return message
def update(self, bin_list):
"""Change the color if reload state changes"""
#TODO This can be cleaner.
#Not sure how to do this with @property since Bin.reloaded is a class attribute
old_state = Bin.reloaded
Bin.reloaded = time.time() >= Bin.reload_time + Bin.last_pressed
switched = old_state != Bin.reloaded
if switched and Bin.reloaded:
for bin_ in bin_list:
bin_.color = bin_.original_color
elif switched:
for bin_ in bin_list:
bin_.color = bin_.locked_color
class Spinner():
"""Component that spins and flashes when activated by ball.
Spinners are found in tunnels and freeze the ball while they're spinning.
Parameters
----------
rekt : pygame.Rect
Location of spinner
value : int (default=70)
Points scored if ball interacts with component
noise : str (default=spin)
Name of mp3 to play when spinning
Attributes
----------
original_color : (int)
Color of rekt when not spinning
color : (int)
Current color of rekt. Flashes when activated
spinning : bool
True if spinner has collided with ball and is currently activate
spin_counter : int
Number of frames spent spinning
spin_left : int
Number of frames left to spin
"""
def __init__(self, rekt, value=75, noise='spin'):
self.rekt = rekt
self.value = value
self.noise = noise
self.original_color = (50, 100, 150)
self.color = self.original_color
self.spinning = False
self.spin_counter = 100
self.spin_left = self.spin_counter
def update(self):
if self.spinning:
self.spin_left -= 1
if self.spin_left % 10 == 0:
if self.color == self.original_color:
self.color = (150, 100, 50)
else:
self.color = self.original_color
if self.spin_left == 0:
self.spin_left = 100
self.spinning = False
class Flipper():
"""Creates left and right flippers the player controls to hit the ball
Parameters
----------
a : Point
Location of the base of flipper
b : Point
Location of the rotation end of flipper
on_angle : float
radian angle of flipper at the top of rotation when user flippers
side : str (default='l')
Indicates if flipper is on left or right side of board
Attributes
----------
rot : int
Makes flipper rotate clockwise (-1) or counter-clockwise (1)
len : float
Length of flipper
angle : float
Current angle of flipper.
off_angle : float
radian angle of flipper at the bottom of rotation when user flippers
flip_up : bool
Is True after user 'flips', until angle ~= on_angle
flip_down : bool
Is True after angle ~= on_angle, until angle ~= off_angle
thickness : int
Visual thinkness of line
"""
def __init__(self, a, b, on_angle, side='l'):
self.a = a
self.b = b
self.on_angle = on_angle
self.rot = 1 if side == 'l' else -1
self.len = math.hypot(self.b.x - self.a.x, self.b.y - self.a.y)
self.angle = (math.atan2(a.x-b.x, a.y-b.y) + math.pi/2) % (2*math.pi)
self.off_angle = self.angle
self.flip_up = False
self.flip_down = False
self.thickness = 1
self.value = 0
self.noise = 'flipper'
def move(self):
"""change flipper end position while flipping"""
if self.flip_up:
self.angle += (.09 * self.rot)
elif self.flip_down:
self.angle -= (.09 * self.rot)
self.angle %= 2*math.pi
self.b.x = self.a.x + math.cos(self.angle) * self.len
self.b.y = self.a.y - math.sin(self.angle) * self.len
def test_flip_limit():
pass
def update(self):
"""Check flipping state and adjust angle and state accordingly"""
delta = .15
if self.flip_up:
self.move()
if self.on_angle - delta <= self.angle <= self.on_angle + delta:
self.flip_up = False
self.flip_down = True
elif self.flip_down:
self.move()
if self.off_angle - delta <= self.angle <= self.off_angle + delta:
self.flip_down = False
class CurveBall(Particle):
"""Slowly increments the balls angle while in effect field
"""
def __init__(self, x, y, size, curve=.075, value=2, noise='chimes'):
super().__init__(x, y, size, value=value, noise=noise)
self.curve = curve
self.color = (142, 19, 214)
def init_coin_list(width, height):
coin_list = [
# Coin(width-20, 200,9,50), #test coin
# Coin(width-20, 600,9,50) #test coin
Coin(80,810,9,25), #lt.1
Coin(112,822,9,25), #lt.4
Coin(95,777,9,25), #lt.2
Coin(110,740,9,25), #lt.3
Coin(144,835,9,25), #lt.6
Coin(125,790,9,25), #lt.5
Coin(width-41-80,810,9,25), #lrt.1
Coin(width-41-112,822,9,25), #rt.4
Coin(width-41-95,777,9,25), #rt.2
Coin(width-41-110,740,9,25), #rt.3
Coin(width-41-144,835,9,25), #rt.6
Coin(width-41-125,790,9,25), #rt.5
Coin(30,20,15,100),
Coin(540,323,12,100),
#around main curver
Coin(188,500,9,25),
Coin(312,500,9,25),
Coin(250,438,9,25),
Coin(250,562,9,25),
Coin(280,552,9,25),
Coin(302,530,9,25),
Coin(280,448,9,25),
Coin(302,470,9,25),
Coin(198,470,9,25),
Coin(198,530,9,25),
Coin(220,552,9,25),
Coin(220,448,9,25),
Coin(250,500,12,100) #middle coin curver
]
for c in range(110,490,38):
coin_list.append(Coin(c,85,9,25))
return coin_list
def init_launch_runway(width, height):
return pygame.Rect(width-1-40,150,40,height-150)
def init_ball(bin_gravity):
return Particle(599-16,1000-15,15,bin_gravity=bin_gravity)
# return Particle(200, 50, 15,bin_gravity=bin_gravity) #testing platforms
def init_bin_list():
bins = [Bin(0, pygame.Rect(150,912,40,48), (255, 0, 255), 'note1'),
Bin(1, pygame.Rect(150+40,912,80,48), (0, 255, 0), 'note2'),
Bin(2, pygame.Rect(290,912,80,48), (255, 0, 0), 'note3'),
Bin(3, pygame.Rect(290+80,912,40,48), (0, 255, 255), 'note4')]
return bins
def init_spinner_list():
spin = [Spinner(pygame.Rect(482, 400, 25, 25)), #left
Spinner(pygame.Rect(5, 275, 25, 25)), #top
Spinner(pygame.Rect(88, 0, 25, 25))] #right
return spin
def init_tube_list(width):
tube_list = [Tube(17, 50, 7, (17, 20), .25*math.pi), #top left corner
Tube(width - 60, 425, 7, (width-75, 440), 1.4*math.pi), # middle right
Tube(140, 15, 7, (111, 35), 1.5*math.pi)]
return tube_list
def init_curver_list():
curver_list = [CurveBall(250, 500, 50),
CurveBall(525, 250, 25),
CurveBall(520, 200, 20),
CurveBall(490, 290, 20)]
return curver_list
def init_platforms():
return Platforms((100,100),(100,650))
def init_left_flipper():
flipper_left = Flipper(Point(150, 912),
Point(245, 960),
1.57)
return flipper_left
def init_right_flipper():
flipper_right = Flipper(Point(410, 912),
Point(315, 960),
1.57, 'r')
return flipper_right
def init_segment_list(width, height):
segment_data = [((width-1-40, height-1), (width-1-40,150)), #shooter line
((width-1, 25), (width-1-25,0),1), #top right corner
((75, 0), (0,100),10), #top left corner
((width-1-40,837), (410,912)), #right funnel
((0,837), (150,912)), #left funnel
((260, 370), (310, 390),20), #Middle
((55,820), (100,700)), #left triangle pt1
((55,820), (150,860)), #left triangle pt2
((410,860), (width-100,820)), #right triangle pt2
((width-1-141,700), (width-100,820)),#right triangle pt3
((width-1-40, 250), (width-1-150, 450)), #right tunnel top
((width-1-40, 325), (width-1-150, 550)), #right tunnel bottom
((35, 275), (100, 400)), #left tunnel top
((0, 300), (75, 440)), #left tunnel bottom
((80, 0), (78, 25)), # small top tunnel left
((120, 0), (122, 25)), # small top tunnel right
]
segment_list = [Segment(*d) for d in segment_data]
return segment_list
def init_particle_list():
particle_data = [(295, 355, 25,10), #2
(245, 285, 25,10), #1
(345, 270, 25,10), #3
(50, 520, 10,10), #1
(100, 550, 10,10), #3
(55, 585, 10,10) #2
]
particle_list = [Particle(*d) for d in particle_data]
return particle_list
def cap(width):
launch_cap = Segment((width-1-40,150),(width-1,125))
return launch_cap
def init_components(width, height, bin_gravity):
"""Set all the pieces of the game board to their proper locations
Parameters
----------
width : int
width of screen
height : int
height of screen
Returns
-------
components_dict : dict
wrapper around all different types of components
"""
components_dict = {}
components_dict['launch_runway'] = init_launch_runway(width,height)
components_dict['ball'] = init_ball(bin_gravity)
components_dict['bin_list'] = init_bin_list()
components_dict['spinner_list'] = init_spinner_list()
components_dict['tube_manager'] = TubeManager(init_tube_list(width))
components_dict['curver_list'] = init_curver_list()
components_dict['coin_list'] = init_coin_list(width,height)
components_dict['platforms'] = init_platforms()
components_dict['segment_list'] = init_segment_list(width,height)
components_dict['particle_list'] = init_particle_list()
return components_dict
|
|
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on Jun 22, 2016
@author: 463188
'''
from datetime import datetime as dateTime2
import datetime
import copy
import re
from dateutil import parser
from ....core.BaseAgent3 import BaseAgent
class JiraAgent(BaseAgent):
changedFields = set()
@BaseAgent.timed
def process(self):
self.userid=self.getCredential("userid")
self.passwd=self.getCredential("passwd")
baseUrl=self.config.get("baseUrl",'')
startFrom = self.config.get("startFrom",'')
lastUpdated = self.tracking.get("lastupdated",startFrom)
currentDate = dateTime2.combine(dateTime2.now().date(), dateTime2.min.time())
responseTemplate=self.getResponseTemplate()
fields = self.extractFields(responseTemplate)
jiraIssuesUrl = baseUrl+"?jql=updated>='"+lastUpdated+"' ORDER BY updated ASC&maxResults="+str(self.config.get("dataFetchCount", 1000))+'&fields='+fields
enableIssueModificationTimeline = self.config.get('enableIssueModificationTimeline',False)
enableReSyncTrigger = self.config.get('enableReSyncTrigger', False)
bypassSprintExtCall = self.config.get('bypassSprintExtCall', False)
issueModificationTimelineCaptureDate=self.tracking.get("issueModificationTimelineCaptureDate", lastUpdated).split(" ")[0]
issueModificationTimelineCaptureDate = parser.parse(issueModificationTimelineCaptureDate)
issueStatusFilter = self.config.get('dynamicTemplate',dict()).get('issueStatusFilter',list())
changeLog = self.config.get('dynamicTemplate',dict()).get('changeLog',None)
if changeLog:
jiraIssuesUrl = jiraIssuesUrl+'&expand=changelog'
changeLogFields = changeLog['fields']
changeLogMetadata = changeLog['metadata']
changeLogResponseTemplate= changeLog['responseTemplate']
startFromDate = parser.parse(startFrom)
total = 1
maxResults = 0
startAt = 0
updatetimestamp = None
sprintField = self.config.get('sprintField',None)
fieldsList = list()
self.propertyExtractor(responseTemplate, fieldsList)
while (startAt + maxResults) < total:
data =[]
workLogData = []
issueModificationTimeline = []
#jiraIssuesUrl = self.buildJiraRestUrl(baseUrl, startFrom, fields) + '&startAt='+str(startAt + maxResults)
response = self.getResponse(jiraIssuesUrl+'&startAt='+str(startAt + maxResults), 'GET', self.userid, self.passwd, None)
jiraIssues = response["issues"]
for issue in jiraIssues:
parsedIssue = self.parseResponse(responseTemplate, issue)
issueStatus = issue.get('fields', dict()).get ('status',dict()).get('name','')
isIssueStatusFilter = str(issueStatus) not in issueStatusFilter
parsedIssue[0]['processed'] = False
inwardIssueMetaData = list()
outwardIssuesMetaData = list()
issueLinkList = issue.get('fields',{}).get ('issuelinks', list())
for issueLink in issueLinkList :
if 'inwardIssue' in issueLink:
linkType = issueLink.get ('type', {}).get('inward', '')
key = issueLink.get ('inwardIssue', {}).get('key', '')
inwardIssueMetaData.append (key+'__'+linkType)
elif 'outwordIssue' in issueLink:
linkType = issueLink.get ('type', {}).get('outward', '')
key = issueLink.get ('outwardIssue', {}).get('key', '')
outwardIssuesMetaData.append (key+'__'+linkType)
parsedIssue[0]['inwardIssuesMetaData'] = inwardIssueMetaData
parsedIssue[0]['outwardIssuesMetaData'] = outwardIssuesMetaData
if sprintField:
self .processSprintInformation(parsedIssue, issue, isIssueStatusFilter, sprintField,self.tracking)
for field in fieldsList:
if field not in parsedIssue[0]:
parsedIssue[0][field] = None
data += parsedIssue
if changeLog:
workLogData += self.processChangeLog(issue, changeLogFields, changeLogResponseTemplate, startFromDate, enableIssueModificationTimeline, issueModificationTimeline, issueModificationTimelineCaptureDate)
maxResults = response['maxResults']
total = response['total']
startAt = response['startAt']
if len(jiraIssues) > 0:
updatetimestamp = jiraIssues[len(jiraIssues) - 1]["fields"]["updated"]
dt = parser.parse(updatetimestamp)
fromDateTime = dt + datetime.timedelta(minutes=0o1)
fromDateTime = fromDateTime.strftime('%Y-%m-%d %H:%M')
self.tracking["lastupdated"] = fromDateTime
jiraKeyMetadata = {"dataUpdateSupported" : True,"uniqueKey" : ["key"]}
self.publishToolsData(data, jiraKeyMetadata)
#self.publishToolsData(data)
if len(workLogData) > 0:
insighstTimeXFieldMapping = self.config.get('dynamicTemplate',dict()).get('changeLog', {}).get('insightsTimeXFieldMapping',None)
timeStampField=insighstTimeXFieldMapping.get('timefield',None)
timeStampFormat=insighstTimeXFieldMapping.get('timeformat',None)
isEpoch=insighstTimeXFieldMapping.get('isEpoch',None);
self.publishToolsData(workLogData, changeLogMetadata ,timeStampField,timeStampFormat,isEpoch,True)
if len(issueModificationTimeline) > 0:
self.publishToolsData(issueModificationTimeline, {"labels": ["LATEST"], "relation": {"properties": list(self.changedFields) + ['fields'], "name":"ISSUE_CHANGE_TIMELINE", "source": {"constraints":["key"]}, "destination": {"labels": ["TIMELINE"], "constraints":["timelineDate","timelineDateEpoch"]}}})
self.updateTrackingJson(self.tracking)
else:
break
latestJiraDateStr = self.tracking["lastupdated"]
latestJiraDate = parser.parse(latestJiraDateStr)
lastTrackedDate = parser.parse(self.tracking.get("lastTrakced", lastUpdated).split(' ')[0])
lastTracked = lastTrackedDate.strftime("%Y-%m-%d %H:%M")
reSync = self.tracking.get("reSync", False)
if enableReSyncTrigger:
if maxResults and not reSync and 0 >= (currentDate - latestJiraDate).total_seconds() <= (26*60*60) and (currentDate - lastTrackedDate).total_seconds() == (24*60*60):
self.tracking["lastupdated"] = lastTracked
self.tracking["issueModificationTimelineCaptureDate"] = lastTracked
self.tracking["reSync"] = True
self.tracking ["lastTracked"] = currentDate.strftime("%Y-%m-%d %H:%M" )
elif reSync and currentDate >= lastTrackedDate:
self.tracking["reSync"] = False
if enableIssueModificationTimeline :
self.tracking["issueModificationTimelineCaptureDate"] = self.tracking["lastupdated"]
if enableReSyncTrigger or enableIssueModificationTimeline:
self.updateTrackingJson(self.tracking)
if bypassSprintExtCall and maxResults:
self.retrieveSprintDetails()
self.retrieveSprintReports()
def buildJiraRestUrl(self, baseUrl, startFrom, fields):
lastUpdatedDate = self.tracking.get("lastupdated", startFrom)
endDate = parser.parse(lastUpdatedDate) + datetime.timedelta(hours=24)
endDate = endDate.strftime('%Y-%m-%d %H:%M')
jiraIssuesUrl = baseUrl+"?jql=updated>='"+lastUpdatedDate+"' AND updated<'"+endDate+"' ORDER BY updated ASC&maxResults="+str(self.config.get("dataFetchCount", 1000))+'&fields='+fields
changeLog = self.config.get('dynamicTemplate',dict()).get('changeLog', None)
if changeLog:
jiraIssuesUrl = jiraIssuesUrl + '&expand=changelog'
return jiraIssuesUrl
def processChangeLog(self, issue, workLogFields, responseTemplate, startFromDate, enableIssueModificationTimeline, issueModificationTimeline, issueModificationTimelineCaptureDate):
changeLog = issue.get('changelog', None)
workLogData = []
injectData = {'issueKey' : issue['key'] }
if changeLog:
histories = changeLog.get('histories', [])
if enableIssueModificationTimeline:
self.buildIssueModificationTimeLine(issue['key'], histories, issueModificationTimeline,issueModificationTimelineCaptureDate)
loadRemoteLinks = False
remoteIssueLinkDataMap = {}
for change in histories:
data = self.parseResponse(responseTemplate, change, injectData)[0]
changeDate = parser.parse(data['changeDate'].split('.')[0])
if changeDate > startFromDate:
items = change['items']
for item in items:
if item['field'] in workLogFields:
dataCopy = copy.deepcopy(data)
dataCopy['changedfield'] = item['field']
dataCopy['fromString'] = item.get('fromString',None)
dataCopy['toString'] = item.get('toString',None)
dataCopy['from'] = item.get('from',None)
dataCopy['to'] = item.get('to',None)
for key in dataCopy.keys():
if dataCopy[key] is None:
dataCopy[key] = "None"
workLogData.append(dataCopy)
if dataCopy.get('changeField', None) == 'RemoteIssueLink':
objectLinkId = dataCopy.get('to', None)
if objectLinkId is None:
objectLinkId = dataCopy.get('from',None)
if objectLinkId :
remoteIssueLinkDataMap[objectLinkId] = dataCopy
loadRemoteLinks = True
if loadRemoteLinks:
try:
self.loadRemoteLinks(issue['key'], remoteIssueLinkDataMap)
except Exception as ex:
self.baseLogger.error(ex)
return workLogData
def loadRemoteLinks(self, issueKey, remoteIssueLinkChangeDataMap):
remoteIssueLinksConfig = self.config.get('dynamicTemplate', {}).get('extensions',{}).get('remoteIssueLinks',None)
if remoteIssueLinksConfig:
remoteIssueLinkRestUrl = remoteIssueLinksConfig.get("remoteIssueLinkRestUrl").format(issueKey)
responseTemplate = remoteIssueLinksConfig.get("remoteIssueLinkResponseTemplate")
remoteIssueLinkResponse = self.getResponse(remoteIssueLinkRestUrl, 'GET', self.userid, self.passwd, None)
if remoteIssueLinkResponse:
parsedResponses = self.parseResponse(responseTemplate, remoteIssueLinkResponse)
for parsedResponse in parsedResponses:
remoteLinkId = parsedResponse['remoteLinkId']
if remoteLinkId:
remoteLinkId = str(remoteLinkId)
remoteLinkChangeObject = remoteIssueLinkChangeDataMap.get(remoteLinkId,{})
remoteLinkChangeObject.update(parsedResponse)
def buildIssueModificationTimeLine(self, issueKey, histories, issueModificationTimeline, issueModificationTimelineCaptureDate):
currentDate = parser.parse(datetime.datetime.now().strftime("%Y-%m-%d"))
timelineMap = {}
for change in histories:
changeDate = parser.parse(change['created'].split('T')[0])
# validate the working. we need to compare the date and time together. also, we will need to capture the change log till date and time.
if currentDate>changeDate >= issueModificationTimelineCaptureDate:
fields = timelineMap.get(str(changeDate), None)
if fields is None:
fields = dict()
timelineMap[str(changeDate)] = fields
items = change['items']
for item in items:
changedField = re.sub (r'[-\+!~@#$%^&*()={}\[\]";<.>//\'\s"]', '' ,str(item['field']).lower()).capitalize()
fields[changedField] = fields.get(changedField, 0) +1
for timelineDate in timelineMap:
data = dict()
data['key'] = issueKey
data['timelineDate'] = timelineDate.split(' ')[0]
fields = timelineMap[timelineDate]
data['fields'] = list(fields.keys())
for field in fields:
data[field] = fields[field]
self.changedFields.add(field)
issueModificationTimeline.append(data)
def scheduleExtensions(self):
bypassSprintExtCall = self.config.get ('bypassSprintExtCall',False)
extensions = self.config.get('dynamicTemplate', {}).get('extensions', None)
if extensions:
#backlog = extensions.get('backlog', None)
#if backlog:
# self.registerExtension('backlog', self.retrieveBacklogDetails, backlog.get('runSchedule'))
sprints = extensions.get('sprints', None)
if sprints and not bypassSprintExtCall:
self.registerExtension('sprints', self.retrieveSprintDetails, sprints.get('runSchedule'))
sprintReport = extensions.get('sprintReport', None)
if sprintReport and not bypassSprintExtCall:
self.registerExtension('sprintReport', self.retrieveSprintReports, sprintReport.get('runSchedule'))
releaseDetails = extensions.get('releaseDetails', None)
if releaseDetails:
self.registerExtension('releaseDetails', self.retrieveReleaseDetails, releaseDetails.get('runSchedule'))
sprintDeletionIdentifier = extensions.get('sprintDeletionIdentifier', None)
if sprintDeletionIdentifier:
self.registerExtension('sprintDeletionIdentifier', self.sprintDeletionIdentifier, sprintDeletionIdentifier.get('runSchedule'))
def extractFields(self, responseTemplate):
fieldsJson = responseTemplate.get("fields", None)
fieldsParam = ''
if fieldsJson:
for field in fieldsJson:
fieldsParam += field + ','
fieldsParam = fieldsParam[:-1]
if self.config.get("sprintField", None):
fieldsParam += ','+ self.config.get("sprintField")
return fieldsParam
def propertyExtractor (self, temObject, data):
if temObject is None or data is None:
return
keyType = type(temObject)
if keyType is dict:
for key in temObject:
self.propertyExtractor(temObject.get(key, None), data)
elif keyType is list:
for valObject in temObject:
self.propertyExtractor(valObject, data)
elif keyType in [str, str]:
data.append(temObject)
else:
self.baseLogger.error ("Response Template not well formed")
def processSprintInformation(self, parsedIssue, issue, isIssueStatusFilter,sprintField, tracking):
sprintStates = set()
if sprintField:
boardsTracking = tracking.get('boards', None)
if boardsTracking is None:
boardsTracking = {}
tracking['boards'] = boardsTracking
sprintDetails = issue.get("fields", {}).get(sprintField, None)
if sprintDetails:
try:
sprints = []
boards = []
for sprint in sprintDetails:
Version =self.config.get('dynamicTemplate', {}).get('versionUrl','')
VersionUrl = self.getResponse(Version, 'GET', self.userid, self.passwd, None)
deploymentType =VersionUrl.get('deploymentType','')
if (deploymentType) == 'Server':
sprintData = {}
sprintDetail = sprint.split("[")[1][:-1]
sprintPropertieTokens = sprintDetail.split(",")
for propertyToken in sprintPropertieTokens:
propertyKeyValToken = propertyToken.split("=")
if len(propertyKeyValToken) > 1:
sprintData[propertyKeyValToken[0]] = propertyKeyValToken[1]
boardId = sprintData.get('rapidViewId')
sprintId = sprintData.get('id')
else:
boardId = sprint.get('boardId')
sprintId = sprint.get('id')
boardTracking = boardsTracking.get(boardId, None)
if boardTracking is None:
boardTracking = {}
boardsTracking[str(boardId)] = boardTracking
sprintTracking = boardTracking.get('sprints', None)
if sprintTracking is None:
sprintTracking = {}
boardTracking['sprints'] = sprintTracking
if sprintTracking.get(sprintId, None) is None:
sprintTracking[str(sprintId)] = {}
if boardId not in boards:
boards.append(boardId)
if sprintId not in sprints:
sprints.append(sprintId)
except Exception as ex:
parsedIssue[0]['error'] = str(ex)
parsedIssue[0]['sprints'] = sprints
parsedIssue[0]['boards'] = boards
#if len(boards) > 1 :
# for board in boards:
# boardTracking = boardsTracking.get(board)
# sprintTracking = boardTracking.get('sprints')
# for sprint in sprints:
# if sprintTracking.get(sprint, None) is None:
# sprintTracking[sprint] = {}
@BaseAgent.timed
def retrieveSprintDetails (self):
sprintDetails = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('sprints', None)
insighstTimeXFieldMapping = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('sprints', {}).get('insightsTimeXFieldMapping',None)
timeStampField=insighstTimeXFieldMapping.get('timefield',None)
timeStampFormat=insighstTimeXFieldMapping.get('timeformat',None)
isEpoch=insighstTimeXFieldMapping.get('isEpoch',None);
boardApiUrl = sprintDetails.get('boardApiUrl')
boards = self.tracking.get('boards', None)
if sprintDetails and boards:
responseTemplate = sprintDetails.get('sprintResponseTemplate', None)
sprintMetadata = sprintDetails.get('sprintMetadata')
for boardId in boards:
data = []
board = boards[boardId]
boardRestUrl = boardApiUrl + '/' + str(boardId)
try:
boardResponse = self.getResponse(boardRestUrl, 'GET', self.userid, self.passwd, None)
board['name'] = boardResponse.get('name')
board['type'] = boardResponse.get('type')
board.pop('error', None)
except Exception as ex:
board['error'] = str(ex)
#Get the individual sprint details.
sprints = board.get('sprints')
for sprint in sprints:
sprintApiUrl = sprintDetails.get('sprintApiUrl')+'/'+sprint
try:
sprintResponse = self.getResponse(sprintApiUrl, 'GET', self.userid, self.passwd, None)
data.append(self.parseResponse(responseTemplate, sprintResponse)[0])
except Exception:
pass;
if len(data) > 0 :
self.publishToolsData(data, sprintMetadata,timeStampField,timeStampFormat,isEpoch,True)
continue
sprintsUrl = boardRestUrl + '/sprint?startAt='
startAt = 0
isLast = False
injectData = {'boardName' : board['name']}
while not isLast:
try:
sprintsResponse = self.getResponse(sprintsUrl+str(startAt), 'GET', self.userid, self.passwd, None)
except Exception as ex3:
#board['error'] = str(ex3)
break
isLast = sprintsResponse['isLast']
startAt = startAt + sprintsResponse['maxResults']
sprintValues = sprintsResponse['values']
parsedSprints = self.parseResponse(responseTemplate, sprintValues, injectData)
for parsedSprint in parsedSprints:
if str(parsedSprint.get('boardId')) == str(boardId):
data.append(parsedSprint)
if len(data) > 0 :
self.publishToolsData(data, sprintMetadata,timeStampField,timeStampFormat,isEpoch,True)
@BaseAgent.timed
def retrieveBacklogDetails(self):
backlogDetails = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('backlog', None)
boardApiUrl = backlogDetails.get('boardApiUrl')
boards = self.tracking.get('boards', None)
backlogMetadata = backlogDetails.get('backlogMetadata')
if backlogDetails and boards:
for boardId in boards:
data = []
board = boards[boardId]
boardRestUrl = boardApiUrl + '/' + str(boardId)
try:
boardResponse = self.getResponse(boardRestUrl, 'GET', self.userid, self.passwd, None)
board['name'] = boardResponse.get('name')
board['type'] = boardResponse.get('type')
board.pop('error', None)
backlogUrl = boardRestUrl + '/backlog?fields=[]&startAt='
startAt = 0
isLast = False
while not isLast:
backlogResponse = self.getResponse(backlogUrl+str(startAt), 'GET', self.userid, self.passwd, None)
isLast = (startAt + backlogResponse['maxResults']) > backlogResponse['total']
startAt = startAt + backlogResponse['maxResults']
backlogIssues = backlogResponse['issues']
for backlogIssue in backlogIssues:
issue = {}
issue['backlogIssueKey'] = backlogIssue.get('key')
issue['projectKey'] = backlogIssue.get('key').split('-')[0]
issue['boardName'] = board['name']
issue['boardId'] = boardId
data.append(issue)
if len(data) > 0 :
self.publishToolsData(data, backlogMetadata)
except Exception as ex:
board['error'] = str(ex)
#Get the individual sprint details.
@BaseAgent.timed
def retrieveSprintReports(self):
sprintDetails = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('sprintReport', None)
boardApiUrl = sprintDetails.get('boardApiUrl')
boards = self.tracking.get('boards', None)
if sprintDetails and boards:
sprintReportUrl = sprintDetails.get('sprintReportUrl', None)
responseTemplate = sprintDetails.get('sprintReportResponseTemplate', None)
#sprintMetadata = sprintDetails.get('sprintMetadata')
relationMetadata = sprintDetails.get('relationMetadata')
for boardId in boards:
board = boards[boardId]
boardName = board.get('name', None)
if boardName is None:
boardRestUrl = boardApiUrl + '/' + str(boardId)
try:
boardResponse = self.getResponse(boardRestUrl, 'GET', self.userid, self.passwd, None)
board['name'] = boardResponse.get('name')
board['type'] = boardResponse.get('type')
board.pop('error', None)
except Exception as ex:
board['error'] = str(ex)
continue
sprints = board['sprints']
for sprintId in sprints:
sprint = sprints[sprintId]
#For velocity, only the completed sprints are considered
#extract the project key from the sprint reports to allow the data tagging
sprintClosed = sprint.get('closed', False)
if not sprintClosed:
sprintReportRestUrl = sprintReportUrl + '?rapidViewId='+str(boardId)+'&sprintId='+str(sprintId)
sprintReportResponse = None
try:
sprintReportResponse = self.getResponse(sprintReportRestUrl, 'GET', self.userid, self.passwd, None)
except Exception as ex:
sprint['error'] = str(ex)
if sprintReportResponse:
content = sprintReportResponse.get('contents', None)
if sprintReportResponse.get('sprint', {}).get('state', 'OPEN') == 'CLOSED':
sprint['closed'] = True
injectData = { 'boardId' : int(boardId), 'sprintId' : int(sprintId) }
data = []
data += self.addSprintDetails(responseTemplate, content, 'completedIssues', injectData)
data += self.addSprintDetails(responseTemplate, content, 'issuesNotCompletedInCurrentSprint', injectData)
data += self.addSprintDetails(responseTemplate, content, 'puntedIssues', injectData)
data += self.addSprintDetails(responseTemplate, content, 'issuesCompletedInAnotherSprint', injectData)
if len(data) > 0:
#self.publishToolsData(self.getSprintInformation(sprintReportResponse, boardId, sprintId, board['name'], board['type']), sprintMetadata)
self.publishToolsData(data, relationMetadata)
self.updateTrackingJson(self.tracking)
@BaseAgent.timed
def getSprintInformation(self, content, boardId, sprintId, boardName, boardType):
data = []
sprint = content.get('sprint')
sprint.pop('linkedPagesCount', None)
sprint.pop('remoteLinks', None)
sprint.pop('sequence', None)
sprint.pop('id', None)
sprint['boardId'] = boardId
sprint['sprintId'] = sprintId
sprint['boardName'] = boardName
sprint['boardType'] = boardType
sprint['sprintName'] = sprint.get('name')
sprint.pop('name', None)
timeStampFormat = '%d/%b/%y'
startDate = sprint.get('startDate', None)
if startDate and startDate != 'None':
sprint['startDateEpoch'] = self.getRemoteDateTime(dateTime2.strptime(startDate.split(' ')[0], timeStampFormat)).get('epochTime')
endDate = sprint.get('endDate', None)
if endDate and endDate != 'None':
sprint['endDateEpoch'] = self.getRemoteDateTime(dateTime2.strptime(endDate.split(' ')[0], timeStampFormat)).get('epochTime')
completeDate = sprint.get('completeDate', None)
if completeDate and completeDate != 'None':
sprint['completeDateEpoch'] = self.getRemoteDateTime(dateTime2.strptime(completeDate.split(' ')[0], timeStampFormat)).get('epochTime')
data.append(sprint)
return data
def addSprintDetails(self, responseTemplate, content, sprintIssueRegion, injectData):
issueKeysAddedDuringSprint = content.get('issueKeysAddedDuringSprint', {})
issues = content.get(sprintIssueRegion, None)
parsedIssues = []
if issues:
parsedIssues = self.parseResponse(responseTemplate, issues, injectData)
for issue in parsedIssues:
issueKey = issue['key']
issue['addedDuringSprint'] = issueKeysAddedDuringSprint.get(issueKey, False)
issue['sprintIssueRegion'] = sprintIssueRegion
issue['projectKey'] = issueKey.split('-')[0]
return parsedIssues
@BaseAgent.timed
def retrieveReleaseDetails(self):
releaseDetails = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('releaseDetails', None)
insighstTimeXFieldMapping = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('releaseDetails', {}).get('insightsTimeXFieldMapping',None)
timeStampField=insighstTimeXFieldMapping.get('timefield',None)
timeStampFormat=insighstTimeXFieldMapping.get('timeformat',None)
isEpoch=insighstTimeXFieldMapping.get('isEpoch',None);
if releaseDetails:
jiraProjectApiUrl = releaseDetails.get('jiraProjectApiUrl', None)
jiraProjectResponseTemplate = releaseDetails.get('jiraProjectResponseTemplate', None)
jiraReleaseResponseTemplate = releaseDetails.get('jiraReleaseResponseTemplate', None)
releaseVersionsMetadata = releaseDetails.get('releaseVersionsMetadata')
if jiraProjectApiUrl and jiraProjectResponseTemplate and jiraReleaseResponseTemplate:
jiraProjects = self.getResponse(jiraProjectApiUrl, 'GET', self.userid, self.passwd, None)
parsedJiraProjects = self.parseResponse(jiraProjectResponseTemplate, jiraProjects)
for parsedJiraProject in parsedJiraProjects:
projectKey = parsedJiraProject['projectKey']
releaseApiUrl = jiraProjectApiUrl + '/' + projectKey + '/versions'
releaseVersionsResponse = self.getResponse(releaseApiUrl, 'GET', self.userid, self.passwd, None)
parsedReleaseVersions = self.parseResponse(jiraReleaseResponseTemplate, releaseVersionsResponse,parsedJiraProject)
self.publishToolsData(parsedReleaseVersions, releaseVersionsMetadata,timeStampField,timeStampFormat,isEpoch,True)
def sprintDeletionIdentifier(self):
deletedSprintsData = list()
sprintDeletionIdentifier = self.config.get('dynamicTemplate', {}).get('extensions', {}).get('sprintDeletionIdentifier',None)
boards = self.tracking.get('boards', None)
if sprintDeletionIdentifier and boards:
sprintUrl = sprintDeletionIdentifier.get('sprintApiUrl','')
userName = self.config.get("userid",'')
password = self.config.get("passwd",'')
for boardId in boards:
boardMetaData = boards[boardId]
sprints = boardMetaData.get('sprints', {})
deletedSprints = dict()
for sprintId in list(sprints.keys()):
sprintExists = self.checkingSprintExistence(sprintUrl, userName, password, sprintId)
if not sprintExists:
deletedSprints[sprintId] = sprints.pop(sprintId, dict())
if len(deletedSprints):
if 'deletedSprints' not in boardMetaData:
boardMetaData['deletedSrints'] = dict()
boardMetaData.get('deletedSprints', dict()).update(deletedSprints)
for sprintId in deletedSprints:
deletedSprintsData.append({'sprintId': int(sprintId), 'boardId': int(boardId), 'event': 'sprintDeleted'})
if len(deletedSprintsData):
metaData = sprintDeletionIdentifier.get('metadata', dict())
self.publishToolsData(deletedSprintsData, metaData)
self.updateTrackingJson(self.tracking)
def checkingSprintExistence(self, sprintUrl, userName, password, sprintId):
try:
url = sprintUrl +'/' +sprintId
self.getResponse(url, 'GET', userName, password, None)
return True
except Exception as err:
if 'Sprint does not exist' in err.message:
return False
else:
return True
if __name__ == "__main__":
JiraAgent()
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide.action import ActionInfoCollection, ActionFactory, SeparatorInfo
from robotide.context import ABOUT_RIDE, SHORTCUT_KEYS
from robotide.controller.commands import SaveFile, SaveAll
from robotide.publish import RideSaveAll, RideClosing, RideSaved, PUBLISHER,\
RideInputValidationError, RideTreeSelection, RideModificationPrevented
from robotide.ui.tagdialogs import ViewAllTagsDialog
from robotide.ui.filedialogs import RobotFilePathDialog
from robotide.utils import RideEventHandler
from robotide.widgets import Dialog, ImageProvider, HtmlWindow
from robotide.preferences import PreferenceEditor
from .actiontriggers import MenuBar, ToolBar, ShortcutRegistry
from .filedialogs import (NewProjectDialog, InitFileFormatDialog)
from .review import ReviewDialog
from .pluginmanager import PluginManager
from robotide.action.shortcut import localize_shortcuts
from .tree import Tree
from .notebook import NoteBook
from .progress import LoadProgressObserver
_menudata = """
[File]
!&New Project | Create a new top level suite | Ctrlcmd-N
---
!&Open Test Suite | Open file containing tests | Ctrlcmd-O | ART_FILE_OPEN
!Open &Directory | Open directory containing datafiles | Shift-Ctrlcmd-O | ART_FOLDER_OPEN
---
&Save | Save selected datafile | Ctrlcmd-S | ART_FILE_SAVE
!Save &All | Save all changes | Ctrlcmd-Shift-S | ART_FILE_SAVE_AS
---
!E&xit | Exit RIDE | Ctrlcmd-Q
[Tools]
!Search Unused Keywords | | | | POSITION-54
!Manage Plugins | | | | POSITION-81
!View All Tags | | F7 | | POSITION-82
!Preferences | | | | POSITION-99
[Help]
!Shortcut keys | RIDE shortcut keys
!User Guide | RIDE User Guide
!Report a Problem | Open browser to the RIDE issue tracker
!Release notes | Shows release notes
!About | Information about RIDE
"""
class RideFrame(wx.Frame, RideEventHandler):
def __init__(self, application, controller):
wx.Frame.__init__(self, parent=None, title='RIDE',
pos=application.settings['mainframe position'],
size=application.settings['mainframe size'])
self.ensure_on_screen()
if application.settings['mainframe maximized']:
self.Maximize()
self._application = application
self._controller = controller
self._init_ui()
self._plugin_manager = PluginManager(self.notebook)
self._review_dialog = None
self._view_all_tags_dialog = None
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_MOVE, self.OnMove)
self.Bind(wx.EVT_MAXIMIZE, self.OnMaximize)
self._subscribe_messages()
self.Show()
wx.CallLater(100, self.actions.register_tools)
def _subscribe_messages(self):
for listener, topic in [
(lambda msg: self.SetStatusText('Saved %s' % msg.path), RideSaved),
(lambda msg: self.SetStatusText('Saved all files'), RideSaveAll),
(self._set_label, RideTreeSelection),
(self._show_validation_error, RideInputValidationError),
(self._show_modification_prevented_error, RideModificationPrevented)
]:
PUBLISHER.subscribe(listener, topic)
def _set_label(self, message):
self.SetTitle(self._create_title(message))
def _create_title(self, message):
title = 'RIDE'
if message:
item = message.item
title += ' - ' + item.datafile.name
if not item.is_modifiable():
title += ' (READ ONLY)'
return title
def _show_validation_error(self, message):
wx.MessageBox(message.message, 'Validation Error', style=wx.ICON_ERROR)
def _show_modification_prevented_error(self, message):
wx.MessageBox('"%s" is read only' % message.controller.datafile_controller.filename,
'Modification prevented',
style=wx.ICON_ERROR)
def _init_ui(self):
splitter = wx.SplitterWindow(self, style=wx.SP_LIVE_UPDATE)
self.notebook = NoteBook(splitter, self._application)
mb = MenuBar(self)
self.toolbar = ToolBar(self)
self.actions = ActionRegisterer(mb, self.toolbar,
ShortcutRegistry(self))
self.tree = Tree(splitter, self.actions, self._application.settings)
self.actions.register_actions(
ActionInfoCollection(_menudata, self, self.tree))
mb.take_menu_bar_into_use()
splitter.SetMinimumPaneSize(100)
splitter.SplitVertically(self.tree, self.notebook, 300)
self.CreateStatusBar()
self.SetIcons(ImageProvider().PROGICONS)
def get_selected_datafile(self):
return self.tree.get_selected_datafile()
def get_selected_datafile_controller(self):
return self.tree.get_selected_datafile_controller()
def OnClose(self, event):
if self._allowed_to_exit():
PUBLISHER.unsubscribe(self._set_label, RideTreeSelection)
RideClosing().publish()
self.Destroy()
else:
wx.CloseEvent.Veto(event)
def OnSize(self, event):
if not self.IsMaximized():
self._application.settings['mainframe maximized'] = False
self._application.settings['mainframe size'] = self.GetSizeTuple()
event.Skip()
def OnMove(self, event):
# When the window is Iconized, a move event is also raised, but we
# don't want to update the position in the settings file
if not self.IsIconized() and not self.IsMaximized():
self._application.settings['mainframe position'] = self.GetPositionTuple()
event.Skip()
def OnMaximize(self, event):
self._application.settings['mainframe maximized'] = True
event.Skip()
def OnReleasenotes(self, event):
pass
def _allowed_to_exit(self):
if self.has_unsaved_changes():
ret = wx.MessageBox('There are unsaved modifications.\n'
'Do you want to save your changes before exiting?',
'Warning', wx.ICON_WARNING|wx.CANCEL|wx.YES_NO)
if ret == wx.CANCEL:
return False
if ret == wx.YES:
self.save()
return True
def has_unsaved_changes(self):
return self._controller.is_dirty()
def OnNewProject(self, event):
if not self.check_unsaved_modifications():
return
NewProjectDialog(self._controller).execute()
self._populate_tree()
def _populate_tree(self):
self.tree.populate(self._controller)
def OnOpenTestSuite(self, event):
if not self.check_unsaved_modifications():
return
path = RobotFilePathDialog(
self, self._controller, self._application.settings).execute()
if path:
self.open_suite(path)
def check_unsaved_modifications(self):
if self.has_unsaved_changes():
ret = wx.MessageBox('There are unsaved modifications.\n'
'Do you want to proceed without saving?',
'Warning', wx.ICON_WARNING|wx.YES_NO)
return ret == wx.YES
return True
def open_suite(self, path):
self._controller.update_default_dir(path)
self._controller.load_datafile(path, LoadProgressObserver(self))
self._populate_tree()
def refresh_datafile(self, item, event):
self.tree.refresh_datafile(item, event)
def OnOpenDirectory(self, event):
if self.check_unsaved_modifications():
path = wx.DirSelector(message='Choose a directory containing Robot files',
defaultPath=self._controller.default_dir)
if path:
self.open_suite(path)
def OnSave(self, event):
self.save()
def OnSaveAll(self, event):
self.save_all()
def save_all(self):
self._show_dialog_for_files_without_format()
self._controller.execute(SaveAll())
def save(self, controller=None):
if controller is None :
controller = self.get_selected_datafile_controller()
if controller is not None:
if not controller.has_format():
self._show_dialog_for_files_without_format(controller)
else:
controller.execute(SaveFile())
def _show_dialog_for_files_without_format(self, controller=None):
files_without_format = self._controller.get_files_without_format(controller)
for f in files_without_format:
self._show_format_dialog_for(f)
def _show_format_dialog_for(self, file_controller_without_format):
InitFileFormatDialog(file_controller_without_format).execute()
def OnExit(self, event):
self.Close()
def OnManagePlugins(self, event):
self._plugin_manager.show(self._application.get_plugins())
def OnViewAllTags(self, event):
if self._view_all_tags_dialog is None:
self._view_all_tags_dialog = ViewAllTagsDialog(self._controller, self)
self._view_all_tags_dialog.show_dialog()
def OnSearchUnusedKeywords(self, event):
if self._review_dialog is None:
self._review_dialog = ReviewDialog(self._controller, self)
self._review_dialog.show_dialog()
def OnPreferences(self, event):
dlg = PreferenceEditor(self, "RIDE - Preferences",
self._application.preferences, style='tree')
# I would prefer that this not be modal, but making it non-
# modal opens up a can of worms. We don't want to have to deal
# with settings getting changed out from under us while the
# dialog is open.
dlg.ShowModal()
dlg.Destroy()
def OnAbout(self, event):
dlg = AboutDialog()
dlg.ShowModal()
dlg.Destroy()
def OnShortcutkeys(self, event):
dialog = ShortcutKeysDialog()
dialog.Show()
def OnReportaProblem(self, event):
wx.LaunchDefaultBrowser('http://github.com/robotframework/RIDE/issues')
def OnUserGuide(self, event):
wx.LaunchDefaultBrowser('http://robotframework.org/robotframework/#user-guide')
def _has_data(self):
return self._controller.data is not None
def _refresh(self):
self._controller.update_namespace()
# This code is copied from http://wiki.wxpython.org/EnsureFrameIsOnScreen,
# and adapted to fit our code style.
def ensure_on_screen(self):
try:
display_id = wx.Display.GetFromWindow(self)
except NotImplementedError:
display_id = 0
if display_id == -1:
display_id = 0
geometry = wx.Display(display_id).GetGeometry()
position = self.GetPosition()
if position.x < geometry.x:
position.x = geometry.x
if position.y < geometry.y:
position.y = geometry.y
size = self.GetSize()
if size.width > geometry.width:
size.width = geometry.width
position.x = geometry.x
elif position.x + size.width > geometry.x + geometry.width:
position.x = geometry.x + geometry.width - size.width
if size.height > geometry.height:
size.height = geometry.height
position.y = geometry.y
elif position.y + size.height > geometry.y + geometry.height:
position.y = geometry.y + geometry.height - size.height
self.SetPosition(position)
self.SetSize(size)
class ActionRegisterer(object):
def __init__(self, menubar, toolbar, shortcut_registry):
self._menubar = menubar
self._toolbar = toolbar
self._shortcut_registry = shortcut_registry
self._tools_items = {}
def register_action(self, action_info):
menubar_can_be_registered = True
action = ActionFactory(action_info)
self._shortcut_registry.register(action)
if hasattr(action_info,"menu_name"):
if action_info.menu_name == "Tools":
self._tools_items[action_info.position] = action
menubar_can_be_registered = False
if menubar_can_be_registered:
self._menubar.register(action)
self._toolbar.register(action)
return action
def register_tools(self):
separator_action = ActionFactory(SeparatorInfo("Tools"))
add_separator_after = ["stop test run","search unused keywords","preview","view ride log"]
for key in sorted(self._tools_items.iterkeys()):
self._menubar.register(self._tools_items[key])
if self._tools_items[key].name.lower() in add_separator_after:
self._menubar.register(separator_action)
def register_actions(self, actions):
for action in actions:
self.register_action(action)
def register_shortcut(self, action_info):
action = ActionFactory(action_info)
self._shortcut_registry.register(action)
return action
class AboutDialog(Dialog):
def __init__(self):
Dialog.__init__(self, title='RIDE')
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(HtmlWindow(self, (450, 200), ABOUT_RIDE), 1, flag=wx.EXPAND)
self.SetSizerAndFit(sizer)
def OnKey(self, *args):
pass
class ShortcutKeysDialog(Dialog):
def __init__(self):
Dialog.__init__(self, title='Shortcut keys for RIDE')
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(HtmlWindow(self, (350, 400), self._get_platform_specific_shortcut_keys()), 1, flag=wx.EXPAND)
self.SetSizerAndFit(sizer)
def OnKey(self, *args):
pass
def _get_platform_specific_shortcut_keys(self):
return localize_shortcuts(SHORTCUT_KEYS)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._agent_pools_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_available_agent_pool_versions_request, build_get_request, build_get_upgrade_profile_request, build_list_request, build_upgrade_node_image_version_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AgentPoolsOperations:
"""AgentPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AgentPoolListResult"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AgentPoolListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2021_09_01.models.AgentPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AgentPoolListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPool":
"""Gets the specified managed cluster agent pool.
Gets the specified managed cluster agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPool, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.AgentPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> "_models.AgentPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'AgentPool')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AgentPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
parameters: "_models.AgentPool",
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Creates or updates an agent pool in the specified managed cluster.
Creates or updates an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:param parameters: The agent pool to create or update.
:type parameters: ~azure.mgmt.containerservice.v2021_09_01.models.AgentPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2021_09_01.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an agent pool in the specified managed cluster.
Deletes an agent pool in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}'} # type: ignore
@distributed_trace_async
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> "_models.AgentPoolUpgradeProfile":
"""Gets the upgrade profile for an agent pool.
Gets the upgrade profile for an agent pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.AgentPoolUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_upgrade_profile_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self.get_upgrade_profile.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default'} # type: ignore
@distributed_trace_async
async def get_available_agent_pool_versions(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.AgentPoolAvailableVersions":
"""Gets a list of supported Kubernetes versions for the specified agent pool.
See `supported Kubernetes versions
<https://docs.microsoft.com/azure/aks/supported-kubernetes-versions>`_ for more details about
the version lifecycle.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AgentPoolAvailableVersions, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_09_01.models.AgentPoolAvailableVersions
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPoolAvailableVersions"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_available_agent_pool_versions_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.get_available_agent_pool_versions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AgentPoolAvailableVersions', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_available_agent_pool_versions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions'} # type: ignore
async def _upgrade_node_image_version_initial(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> Optional["_models.AgentPool"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AgentPool"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_upgrade_node_image_version_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
template_url=self._upgrade_node_image_version_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_upgrade_node_image_version_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
@distributed_trace_async
async def begin_upgrade_node_image_version(
self,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.AgentPool"]:
"""Upgrades the node image version of an agent pool to the latest.
Upgrading the node image version of an agent pool applies the newest OS and runtime updates to
the nodes. AKS provides one new image per week with the latest updates. For more details on
node image versions, see: https://docs.microsoft.com/azure/aks/node-image-upgrade.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param agent_pool_name: The name of the agent pool.
:type agent_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AgentPool or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2021_09_01.models.AgentPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AgentPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._upgrade_node_image_version_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
agent_pool_name=agent_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AgentPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_upgrade_node_image_version.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion'} # type: ignore
|
|
#
# Copyright 2017 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Unit tests for finance.slippage
'''
from collections import namedtuple
import datetime
from math import sqrt
from nose_parameterized import parameterized
import numpy as np
import pandas as pd
import pytz
from zipline.assets import Equity, Future
from zipline.data.data_portal import DataPortal
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.finance.order import Order
from zipline.finance.slippage import (
EquitySlippageModel,
fill_price_worse_than_limit_price,
FutureSlippageModel,
SlippageModel,
VolatilityVolumeShare,
VolumeShareSlippage,
FixedBasisPointsSlippage,
)
from zipline.protocol import DATASOURCE_TYPE, BarData
from zipline.testing import (
create_minute_bar_data,
tmp_bcolz_equity_minute_bar_reader,
)
from zipline.testing.fixtures import (
WithAssetFinder,
WithCreateBarData,
WithDataPortal,
WithSimParams,
WithTradingCalendars,
ZiplineTestCase,
)
from zipline.utils.classproperty import classproperty
from zipline.utils.pandas_utils import normalize_date
TestOrder = namedtuple('TestOrder', 'limit direction')
class SlippageTestCase(WithCreateBarData,
WithSimParams,
WithDataPortal,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-05 14:31', tz='utc')
END_DATE = pd.Timestamp('2006-01-05 14:36', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_DATA_FREQUENCY = 'minute'
SIM_PARAMS_EMISSION_RATE = 'daily'
ASSET_FINDER_EQUITY_SIDS = (133,)
ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp('2006-01-05', tz='utc')
ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp('2006-01-07', tz='utc')
minutes = pd.DatetimeIndex(
start=START_DATE,
end=END_DATE - pd.Timedelta('1 minute'),
freq='1min'
)
@classproperty
def CREATE_BARDATA_DATA_FREQUENCY(cls):
return cls.sim_params.data_frequency
@classmethod
def make_equity_minute_bar_data(cls):
yield 133, pd.DataFrame(
{
'open': [3.0, 3.0, 3.5, 4.0, 3.5],
'high': [3.15, 3.15, 3.15, 3.15, 3.15],
'low': [2.85, 2.85, 2.85, 2.85, 2.85],
'close': [3.0, 3.5, 4.0, 3.5, 3.0],
'volume': [2000, 2000, 2000, 2000, 2000],
},
index=cls.minutes,
)
@classmethod
def init_class_fixtures(cls):
super(SlippageTestCase, cls).init_class_fixtures()
cls.ASSET133 = cls.asset_finder.retrieve_asset(133)
def test_allowed_asset_types(self):
# Custom equities model.
class MyEquitiesModel(EquitySlippageModel):
def process_order(self, data, order):
return 0, 0
self.assertEqual(MyEquitiesModel.allowed_asset_types, (Equity,))
# Custom futures model.
class MyFuturesModel(FutureSlippageModel):
def process_order(self, data, order):
return 0, 0
self.assertEqual(MyFuturesModel.allowed_asset_types, (Future,))
# Custom model for both equities and futures.
class MyMixedModel(EquitySlippageModel, FutureSlippageModel):
def process_order(self, data, order):
return 0, 0
self.assertEqual(MyMixedModel.allowed_asset_types, (Equity, Future))
# Equivalent custom model for both equities and futures.
class MyMixedModel(SlippageModel):
def process_order(self, data, order):
return 0, 0
self.assertEqual(MyMixedModel.allowed_asset_types, (Equity, Future))
SomeType = type('SomeType', (object,), {})
# A custom model that defines its own allowed types should take
# precedence over the parent class definitions.
class MyCustomModel(EquitySlippageModel, FutureSlippageModel):
allowed_asset_types = (SomeType,)
def process_order(self, data, order):
return 0, 0
self.assertEqual(MyCustomModel.allowed_asset_types, (SomeType,))
def test_fill_price_worse_than_limit_price(self):
non_limit_order = TestOrder(limit=None, direction=1)
limit_buy = TestOrder(limit=1.5, direction=1)
limit_sell = TestOrder(limit=1.5, direction=-1)
for price in [1, 1.5, 2]:
self.assertFalse(
fill_price_worse_than_limit_price(price, non_limit_order)
)
self.assertFalse(fill_price_worse_than_limit_price(1, limit_buy))
self.assertFalse(fill_price_worse_than_limit_price(1.5, limit_buy))
self.assertTrue(fill_price_worse_than_limit_price(2, limit_buy))
self.assertTrue(fill_price_worse_than_limit_price(1, limit_sell))
self.assertFalse(fill_price_worse_than_limit_price(1.5, limit_sell))
self.assertFalse(fill_price_worse_than_limit_price(2, limit_sell))
def test_orders_limit(self):
slippage_model = VolumeShareSlippage()
slippage_model.data_portal = self.data_portal
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'asset': self.ASSET133,
'limit': 3.5})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[3],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'asset': self.ASSET133,
'limit': 3.5})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[3],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'asset': self.ASSET133,
'limit': 3.6})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[3],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
txn = orders_txns[0][1]
expected_txn = {
'price': float(3.50021875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
# we ordered 100 shares, but default volume slippage only allows
# for 2.5% of the volume. 2.5% * 2000 = 50 shares
'amount': int(50),
'asset': self.ASSET133,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'asset': self.ASSET133,
'limit': 3.5})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[0],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'asset': self.ASSET133,
'limit': 3.5})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[0],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'asset': self.ASSET133,
'limit': 3.4})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[1],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.49978125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-50),
'asset': self.ASSET133,
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
def test_orders_stop_limit(self):
slippage_model = VolumeShareSlippage()
slippage_model.data_portal = self.data_portal
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'asset': self.ASSET133,
'stop': 4.0,
'limit': 3.0})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[2],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[3],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'asset': self.ASSET133,
'stop': 4.0,
'limit': 3.5})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[2],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[3],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'asset': self.ASSET133,
'stop': 4.0,
'limit': 3.6})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[2],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[3],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.50021875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(50),
'asset': self.ASSET133
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'asset': self.ASSET133,
'stop': 3.0,
'limit': 4.0})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[0],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[1],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'asset': self.ASSET133,
'stop': 3.0,
'limit': 3.5})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[0],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[1],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'asset': self.ASSET133,
'stop': 3.0,
'limit': 3.4})
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[0],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[1],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.49978125),
'dt': datetime.datetime(2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-50),
'asset': self.ASSET133,
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
class VolumeShareSlippageTestCase(WithCreateBarData,
WithSimParams,
WithDataPortal,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-05 14:31', tz='utc')
END_DATE = pd.Timestamp('2006-01-05 14:36', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_DATA_FREQUENCY = 'minute'
SIM_PARAMS_EMISSION_RATE = 'daily'
ASSET_FINDER_EQUITY_SIDS = (133,)
ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp('2006-01-05', tz='utc')
ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp('2006-01-07', tz='utc')
minutes = pd.DatetimeIndex(
start=START_DATE,
end=END_DATE - pd.Timedelta('1 minute'),
freq='1min'
)
@classproperty
def CREATE_BARDATA_DATA_FREQUENCY(cls):
return cls.sim_params.data_frequency
@classmethod
def make_equity_minute_bar_data(cls):
yield 133, pd.DataFrame(
{
'open': [3.00],
'high': [3.15],
'low': [2.85],
'close': [3.00],
'volume': [200],
},
index=[cls.minutes[0]],
)
@classmethod
def make_futures_info(cls):
return pd.DataFrame({
'sid': [1000],
'root_symbol': ['CL'],
'symbol': ['CLF06'],
'start_date': [cls.ASSET_FINDER_EQUITY_START_DATE],
'end_date': [cls.ASSET_FINDER_EQUITY_END_DATE],
'multiplier': [500],
'exchange': ['CMES'],
})
@classmethod
def make_future_minute_bar_data(cls):
yield 1000, pd.DataFrame(
{
'open': [5.00],
'high': [5.15],
'low': [4.85],
'close': [5.00],
'volume': [100],
},
index=[cls.minutes[0]],
)
@classmethod
def init_class_fixtures(cls):
super(VolumeShareSlippageTestCase, cls).init_class_fixtures()
cls.ASSET133 = cls.asset_finder.retrieve_asset(133)
cls.ASSET1000 = cls.asset_finder.retrieve_asset(1000)
def test_volume_share_slippage(self):
slippage_model = VolumeShareSlippage()
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
asset=self.ASSET133
)
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[0],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.0001875),
'dt': datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': int(5),
'asset': self.ASSET133,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
# TODO: Make expected_txn an Transaction object and ensure there
# is a __eq__ for that class.
self.assertEquals(expected_txn, txn.__dict__)
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
asset=self.ASSET133
)
]
# Set bar_data to be a minute ahead of last trade.
# Volume share slippage should not execute when there is no trade.
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[1],
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 0)
def test_volume_share_slippage_with_future(self):
slippage_model = VolumeShareSlippage(volume_limit=1, price_impact=0.3)
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=10,
filled=0,
asset=self.ASSET1000,
),
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.minutes[0],
)
orders_txns = list(
slippage_model.simulate(bar_data, self.ASSET1000, open_orders)
)
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
# We expect to fill the order for all 10 contracts. The volume for the
# futures contract in this bar is 100, so our volume share is:
# 10.0 / 100 = 0.1
# The current price is 5.0 and the price impact is 0.3, so the expected
# impacted price is:
# 5.0 + (5.0 * (0.1 ** 2) * 0.3) = 5.015
expected_txn = {
'price': 5.015,
'dt': datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': 10,
'asset': self.ASSET1000,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id,
}
self.assertIsNotNone(txn)
self.assertEquals(expected_txn, txn.__dict__)
class VolatilityVolumeShareTestCase(WithCreateBarData,
WithSimParams,
WithDataPortal,
ZiplineTestCase):
ASSET_START_DATE = pd.Timestamp('2006-02-10')
TRADING_CALENDAR_STRS = ('NYSE', 'us_futures')
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
@classmethod
def init_class_fixtures(cls):
super(VolatilityVolumeShareTestCase, cls).init_class_fixtures()
cls.ASSET = cls.asset_finder.retrieve_asset(1000)
@classmethod
def make_futures_info(cls):
return pd.DataFrame({
'sid': [1000, 1001],
'root_symbol': ['CL', 'FV'],
'symbol': ['CLF07', 'FVF07'],
'start_date': [cls.ASSET_START_DATE, cls.START_DATE],
'end_date': [cls.END_DATE, cls.END_DATE],
'multiplier': [500, 500],
'exchange': ['CMES', 'CMES'],
})
@classmethod
def make_future_minute_bar_data(cls):
data = list(
super(
VolatilityVolumeShareTestCase, cls,
).make_future_minute_bar_data()
)
# Make the first month's worth of data NaN to simulate cases where a
# futures contract does not exist yet.
data[0][1].loc[:cls.ASSET_START_DATE] = np.NaN
return data
def test_calculate_impact_buy(self):
answer_key = [
# We ordered 10 contracts, but are capped at 100 * 0.05 = 5
(91485.500085168125, 5),
(91486.500085169057, 5),
(None, None),
]
order = Order(
dt=pd.Timestamp.now(tz='utc').round('min'),
asset=self.ASSET,
amount=10,
)
self._calculate_impact(order, answer_key)
def test_calculate_impact_sell(self):
answer_key = [
# We ordered -10 contracts, but are capped at -(100 * 0.05) = -5
(91485.499914831875, -5),
(91486.499914830943, -5),
(None, None),
]
order = Order(
dt=pd.Timestamp.now(tz='utc').round('min'),
asset=self.ASSET,
amount=-10,
)
self._calculate_impact(order, answer_key)
def _calculate_impact(self, test_order, answer_key):
model = VolatilityVolumeShare(volume_limit=0.05)
first_minute = pd.Timestamp('2006-03-31 11:35AM', tz='UTC')
next_3_minutes = self.trading_calendar.minutes_window(first_minute, 3)
remaining_shares = test_order.open_amount
for i, minute in enumerate(next_3_minutes):
data = self.create_bardata(simulation_dt_func=lambda: minute)
new_order = Order(
dt=data.current_dt, asset=self.ASSET, amount=remaining_shares,
)
price, amount = model.process_order(data, new_order)
self.assertEqual(price, answer_key[i][0])
self.assertEqual(amount, answer_key[i][1])
amount = amount or 0
if remaining_shares < 0:
remaining_shares = min(0, remaining_shares - amount)
else:
remaining_shares = max(0, remaining_shares - amount)
def test_calculate_impact_without_history(self):
model = VolatilityVolumeShare(volume_limit=1)
late_start_asset = self.asset_finder.retrieve_asset(1000)
early_start_asset = self.asset_finder.retrieve_asset(1001)
cases = [
# History will look for data before the start date.
(pd.Timestamp('2006-01-05 11:35AM', tz='UTC'), early_start_asset),
# Start day of the futures contract; no history yet.
(pd.Timestamp('2006-02-10 11:35AM', tz='UTC'), late_start_asset),
# Only a week's worth of history data.
(pd.Timestamp('2006-02-17 11:35AM', tz='UTC'), late_start_asset),
]
for minute, asset in cases:
data = self.create_bardata(simulation_dt_func=lambda: minute)
order = Order(dt=data.current_dt, asset=asset, amount=10)
price, amount = model.process_order(data, order)
avg_price = (
data.current(asset, 'high') + data.current(asset, 'low')
) / 2
expected_price = \
avg_price * (1 + model.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT)
self.assertAlmostEqual(price, expected_price, delta=0.001)
self.assertEqual(amount, 10)
def test_impacted_price_worse_than_limit(self):
model = VolatilityVolumeShare(volume_limit=0.05)
# Use all the same numbers from the 'calculate_impact' tests. Since the
# impacted price is 59805.5, which is worse than the limit price of
# 59800, the model should return None.
minute = pd.Timestamp('2006-03-01 11:35AM', tz='UTC')
data = self.create_bardata(simulation_dt_func=lambda: minute)
order = Order(
dt=data.current_dt, asset=self.ASSET, amount=10, limit=59800,
)
price, amount = model.process_order(data, order)
self.assertIsNone(price)
self.assertIsNone(amount)
def test_low_transaction_volume(self):
# With a volume limit of 0.001, and a bar volume of 100, we should
# compute a transaction volume of 100 * 0.001 = 0.1, which gets rounded
# down to zero. In this case we expect no amount to be transacted.
model = VolatilityVolumeShare(volume_limit=0.001)
minute = pd.Timestamp('2006-03-01 11:35AM', tz='UTC')
data = self.create_bardata(simulation_dt_func=lambda: minute)
order = Order(dt=data.current_dt, asset=self.ASSET, amount=10)
price, amount = model.process_order(data, order)
self.assertIsNone(price)
self.assertIsNone(amount)
class MarketImpactTestCase(WithCreateBarData, ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (1,)
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
return create_minute_bar_data(
trading_calendar.minutes_for_sessions_in_range(
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
),
cls.asset_finder.equities_sids,
)
def test_window_data(self):
session = pd.Timestamp('2006-03-01')
minute = self.trading_calendar.minutes_for_session(session)[1]
data = self.create_bardata(simulation_dt_func=lambda: minute)
asset = self.asset_finder.retrieve_asset(1)
mean_volume, volatility = VolatilityVolumeShare(0.0)._get_window_data(
data, asset, window_length=20,
)
# close volume
# 2006-01-31 00:00:00+00:00 29.0 119.0
# 2006-02-01 00:00:00+00:00 30.0 120.0
# 2006-02-02 00:00:00+00:00 31.0 121.0
# 2006-02-03 00:00:00+00:00 32.0 122.0
# 2006-02-06 00:00:00+00:00 33.0 123.0
# 2006-02-07 00:00:00+00:00 34.0 124.0
# 2006-02-08 00:00:00+00:00 35.0 125.0
# 2006-02-09 00:00:00+00:00 36.0 126.0
# 2006-02-10 00:00:00+00:00 37.0 127.0
# 2006-02-13 00:00:00+00:00 38.0 128.0
# 2006-02-14 00:00:00+00:00 39.0 129.0
# 2006-02-15 00:00:00+00:00 40.0 130.0
# 2006-02-16 00:00:00+00:00 41.0 131.0
# 2006-02-17 00:00:00+00:00 42.0 132.0
# 2006-02-21 00:00:00+00:00 43.0 133.0
# 2006-02-22 00:00:00+00:00 44.0 134.0
# 2006-02-23 00:00:00+00:00 45.0 135.0
# 2006-02-24 00:00:00+00:00 46.0 136.0
# 2006-02-27 00:00:00+00:00 47.0 137.0
# 2006-02-28 00:00:00+00:00 48.0 138.0
# Mean volume is (119 + 138) / 2 = 128.5
self.assertEqual(mean_volume, 128.5)
# Volatility is closes.pct_change().std() * sqrt(252)
reference_vol = pd.Series(range(29, 49)).pct_change().std() * sqrt(252)
self.assertEqual(volatility, reference_vol)
class OrdersStopTestCase(WithSimParams,
WithAssetFinder,
WithTradingCalendars,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-05 14:31', tz='utc')
END_DATE = pd.Timestamp('2006-01-05 14:36', tz='utc')
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_DATA_FREQUENCY = 'minute'
SIM_PARAMS_EMISSION_RATE = 'daily'
ASSET_FINDER_EQUITY_SIDS = (133,)
minutes = pd.DatetimeIndex(
start=START_DATE,
end=END_DATE - pd.Timedelta('1 minute'),
freq='1min'
)
@classmethod
def init_class_fixtures(cls):
super(OrdersStopTestCase, cls).init_class_fixtures()
cls.ASSET133 = cls.asset_finder.retrieve_asset(133)
STOP_ORDER_CASES = {
# Stop orders can be long/short and have their price greater or
# less than the stop.
#
# A stop being reached is conditional on the order direction.
# Long orders reach the stop when the price is greater than the stop.
# Short orders reach the stop when the price is less than the stop.
#
# Which leads to the following 4 cases:
#
# | long | short |
# | price > stop | | |
# | price < stop | | |
#
# Currently the slippage module acts according to the following table,
# where 'X' represents triggering a transaction
# | long | short |
# | price > stop | | X |
# | price < stop | X | |
#
# However, the following behavior *should* be followed.
#
# | long | short |
# | price > stop | X | |
# | price < stop | | X |
'long | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 4.0,
'high': 3.15,
'low': 2.85,
'close': 4.0,
'open': 3.5
},
'expected': {
'transaction': {
'price': 4.00025,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': 50,
}
}
},
'long | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'stop': 3.6
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'close': 3.5,
'open': 4.0
},
'expected': {
'transaction': None
}
},
'short | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'stop': 3.4
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'close': 3.5,
'open': 3.0
},
'expected': {
'transaction': None
}
},
'short | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.0,
'high': 3.15,
'low': 2.85,
'close': 3.0,
'open': 3.0
},
'expected': {
'transaction': {
'price': 2.9998125,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': -50,
}
}
},
}
@parameterized.expand(sorted(
(name, case['order'], case['event'], case['expected'])
for name, case in STOP_ORDER_CASES.items()
))
def test_orders_stop(self, name, order_data, event_data, expected):
data = order_data
data['asset'] = self.ASSET133
order = Order(**data)
if expected['transaction']:
expected['transaction']['asset'] = self.ASSET133
event_data['asset'] = self.ASSET133
assets = (
(133, pd.DataFrame(
{
'open': [event_data['open']],
'high': [event_data['high']],
'low': [event_data['low']],
'close': [event_data['close']],
'volume': [event_data['volume']],
},
index=[pd.Timestamp('2006-01-05 14:31', tz='UTC')],
)),
)
days = pd.date_range(
start=normalize_date(self.minutes[0]),
end=normalize_date(self.minutes[-1])
)
with tmp_bcolz_equity_minute_bar_reader(
self.trading_calendar, days, assets) as reader:
data_portal = DataPortal(
self.asset_finder, self.trading_calendar,
first_trading_day=reader.first_trading_day,
equity_minute_reader=reader,
)
slippage_model = VolumeShareSlippage()
try:
dt = pd.Timestamp('2006-01-05 14:31', tz='UTC')
bar_data = BarData(
data_portal,
lambda: dt,
self.sim_params.data_frequency,
self.trading_calendar,
NoRestrictions(),
)
_, txn = next(slippage_model.simulate(
bar_data,
self.ASSET133,
[order],
))
except StopIteration:
txn = None
if expected['transaction'] is None:
self.assertIsNone(txn)
else:
self.assertIsNotNone(txn)
for key, value in expected['transaction'].items():
self.assertEquals(value, txn[key])
class FixedBasisPointsSlippageTestCase(WithCreateBarData,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-05', tz='utc')
END_DATE = pd.Timestamp('2006-01-05', tz='utc')
ASSET_FINDER_EQUITY_SIDS = (133,)
first_minute = (
pd.Timestamp('2006-01-05 9:31', tz='US/Eastern').tz_convert('UTC')
)
@classmethod
def make_equity_minute_bar_data(cls):
yield 133, pd.DataFrame(
{
'open': [2.9],
'high': [3.15],
'low': [2.85],
'close': [3.00],
'volume': [200],
},
index=[cls.first_minute],
)
@classmethod
def init_class_fixtures(cls):
super(FixedBasisPointsSlippageTestCase, cls).init_class_fixtures()
cls.ASSET133 = cls.asset_finder.retrieve_asset(133)
@parameterized.expand([
# Volume limit of 10% on an order of 100 shares. Since the bar volume
# is 200, we should hit the limit and only fill 20 shares.
('5bps_over_vol_limit', 5, 0.1, 100, 3.0015, 20),
# Same as previous, but on the short side.
('5bps_negative_over_vol_limit', 5, 0.1, -100, 2.9985, -20),
# Volume limit of 10% on an order of 10 shares. We should fill the full
# amount.
('5bps_under_vol_limit', 5, 0.1, 10, 3.0015, 10),
# Same as previous, but on the short side.
('5bps_negative_under_vol_limit', 5, 0.1, -10, 2.9985, -10),
# Change the basis points value.
('10bps', 10, 0.1, 100, 3.003, 20),
# Change the volume limit points value.
('20pct_volume_limit', 5, 0.2, 100, 3.0015, 40),
])
def test_fixed_bps_slippage(self,
name,
basis_points,
volume_limit,
order_amount,
expected_price,
expected_amount):
slippage_model = FixedBasisPointsSlippage(basis_points=basis_points,
volume_limit=volume_limit)
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=order_amount,
filled=0,
asset=self.ASSET133
)
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.first_minute
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': expected_price,
'dt': datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': expected_amount,
'asset': self.ASSET133,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
self.assertEquals(expected_txn, txn.__dict__)
@parameterized.expand([
# Volume limit for the bar is 20. We've ordered 10 total shares.
# We should fill both orders completely.
('order_under_limit', 9, 1, 9, 1),
# Volume limit for the bar is 20. We've ordered 21 total shares.
# The second order should have one share remaining after fill.
('order_over_limit', -3, 18, -3, 17),
])
def test_volume_limit(self, name,
first_order_amount,
second_order_amount,
first_order_fill_amount,
second_order_fill_amount):
slippage_model = FixedBasisPointsSlippage(basis_points=5,
volume_limit=0.1)
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=order_amount,
filled=0,
asset=self.ASSET133
)
for order_amount in [first_order_amount, second_order_amount]
]
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.first_minute,
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEquals(len(orders_txns), 2)
_, first_txn = orders_txns[0]
_, second_txn = orders_txns[1]
self.assertEquals(first_txn['amount'], first_order_fill_amount)
self.assertEquals(second_txn['amount'], second_order_fill_amount)
def test_broken_constructions(self):
with self.assertRaises(ValueError) as e:
FixedBasisPointsSlippage(basis_points=-1)
self.assertEqual(
str(e.exception),
"FixedBasisPointsSlippage() expected a value greater than "
"or equal to 0 for argument 'basis_points', but got -1 instead."
)
with self.assertRaises(ValueError) as e:
FixedBasisPointsSlippage(volume_limit=0)
self.assertEqual(
str(e.exception),
"FixedBasisPointsSlippage() expected a value strictly "
"greater than 0 for argument 'volume_limit', but got 0 instead."
)
def test_fill_zero_shares(self):
slippage_model = FixedBasisPointsSlippage(basis_points=5,
volume_limit=0.1)
# since the volume limit for the bar is 20, the first order will be
# filled and there will be a transaction for it, and the second order
# will order zero shares so there should not be a transaction for it.
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=20,
filled=0,
asset=self.ASSET133
)
] * 2
bar_data = self.create_bardata(
simulation_dt_func=lambda: self.first_minute
)
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEqual(1, len(orders_txns))
# ordering zero shares should result in zero transactions
open_orders = [
Order(
dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=0,
filled=0,
asset=self.ASSET133
)
]
orders_txns = list(slippage_model.simulate(
bar_data,
self.ASSET133,
open_orders,
))
self.assertEqual(0, len(orders_txns))
|
|
import logging
from django.utils.text import slugify
from rest_framework import permissions, status, viewsets
from rest_framework.authentication import (
SessionAuthentication,
TokenAuthentication
)
from rest_framework.decorators import list_route
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
import maya
from .models import ChatMessage
from .serializers import ChatMessageSerializer
logger = logging.getLogger(__name__)
class IsOwner(permissions.IsAuthenticated):
"""Only allow owners of an object to view/edit it."""
def has_object_permission(self, request, view, obj):
return obj.user == request.user
class PageSizePagination(PageNumberPagination):
"""Allow specifying a `page_size` querystring param to change page size."""
page_size_query_param = 'page_size'
class ChatMessageViewSet(viewsets.ReadOnlyModelViewSet):
"""
## Contents
- <a href="#list-a-users-chat-messages">List a User's Chat Messages</a>
- <a href="#list-a-users-unread-messages">List a user's unread messages</a>
- <a href="#chatmessages-objects">ChatMessage objects</a>
- <a href="#chat-room-history">Chat Room History</a>
- <a href="#filters">Fitlers</a>
----
## List a User's Chat Messages.
This endpoint allows you to retrieve a paginated list of `ChatMessage`
objects that were created by the authenticated user. This endpoint is
currently read-only.
## List a user's unread messages.
You can also retrive a list of the authenticated user's unread messages
by sending a GET request to [/api/chat/unread/](/api/chat/unread/).
## ChatMessage objects.
{
"id": 61,
"user": 1,
"user_username": "brad",
"user_full_name": "Brad Montgomery",
"room": "chat-brad-russell",
"text": "Hi there, this is a message",
"read": false,
"created_on": "2017-01-04 23:10:00+0000"
}
`ChatMessage` objects will have the following format, where:
- `id` is the ChatMessage object's unique ID.
- `user` is the user id for the user the created the chat message.
- `user_username` is the author's username.
- `user_full_name` is the author's full name.
- `room` is the room in which the message was posted. All rooms are prefixed
with `chat-` as a string and contain both participants IDs. Chat
room participant IDs will always be listed in ascending order.
- `text` is the text of the message.
- `read` is a boolean. True means the user has seen the message, False
means it is unread.
- `created_on` is the date on which the message was persisted to the database.
## Chat Room History
You can also retrive the entire history for a given chat room at
[/api/chat/history/](/api/chat/history), with two restrictions:
1. You must provide the exact name of the chat room as a GET parameter,
e.g. `/api/chat/history/?room=chat-user_a-user_b`
2. The authenticated user *must* have been a member of that chat room.
The number of messages returned from this endpoint can be controlled with
a `size` parameter (the default is 20). For example, the following request
would return 10 messages from the room `chat-foo-bar`:
/api/chat/history/?room=chat-foo-bar&size=10
## Marking messages as read.
Send a PUT request with the following payload:
{
room: 'chat-1-42',
}
This will update all messages in which the authenticated user was a
recipient, setting them as `read`.
## Filters
- `since`: Retrieve all chat messages that have been created since a given
date/time.
- `before`: Retrieve all chat messages that were created _before_ the given
date/time.
----
"""
authentication_classes = (TokenAuthentication, SessionAuthentication)
queryset = ChatMessage.objects.all()
serializer_class = ChatMessageSerializer
permission_classes = [IsOwner]
pagination_class = PageSizePagination
def get_queryset(self):
self.queryset = super().get_queryset().filter(user=self.request.user)
return self.queryset
@list_route(methods=['get'], url_path='unread')
def unread(self, request, pk=None):
"""List the current user's unread chat messages."""
if not request.user.is_authenticated():
return Response({}, status=status.HTTP_401_UNAUTHORIZED)
messages = ChatMessage.objects.filter(
read=False,
room__icontains=request.user.id,
)
content = {
'count': messages.count(),
'results': ChatMessageSerializer(messages, many=True).data,
}
return Response(content, status=status.HTTP_200_OK)
@list_route(methods=['get'], url_path='history')
def chat_room_history(self, request, pk=None):
"""List some messages for the given room, with some restrictions:
1. The room name is provided as a GET param, e.g. (?room=whatever)
2. The authenticated user must have been a member of the room.
Available filters:
- room: (required) The room slug to pull history for
- since: Get history after the give date or datetime.
- before: Get history before the give date or datetime.
- size: Number of messages to retrieve (default is 20)
"""
if not request.user.is_authenticated():
return Response({}, status=status.HTTP_401_UNAUTHORIZED)
# Pull all supported filters from the query params.
room = request.query_params.get('room', None)
size = int(request.query_params.get('size', 20))
since = request.query_params.get('since')
before = request.query_params.get('before')
content = {}
user_id = slugify(request.user.id)
messages = ChatMessage.objects.all()
if room and user_id in room:
messages = messages.filter(room=room)
if since:
since = maya.parse(since).datetime()
messages = messages.filter(created_on__gte=since)
elif before:
before = maya.parse(before).datetime()
messages = messages.filter(created_on__lte=before)
messages = messages[:size]
content = {
'count': messages.count(),
'results': ChatMessageSerializer(messages, many=True).data,
}
return Response(content, status=status.HTTP_200_OK)
@list_route(methods=['put'], url_path='read')
def chat_room_mark_read(self, request):
"""Set the whole chat room's status as 'read'.
1. The room name is provided in the payload, e.g. (room=whatever)
2. The authenticated user must have been a member of the room.
"""
if not request.user.is_authenticated():
return Response({}, status=status.HTTP_401_UNAUTHORIZED)
room = request.data.get('room', None)
user_id = slugify(request.user.id)
if room and user_id in room:
# We want to update messages in which the authenticated user was
# a recipeient, so exclude any of the messages they sent
messages = ChatMessage.objects.filter(room=room)
messages = messages.exclude(user=request.user)
messages.update(read=True)
return Response(None, status=status.HTTP_204_NO_CONTENT)
err = {
'error': 'Either room not found or user was not a member',
}
return Response(err, status.HTTP_400_BAD_REQUEST)
|
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import importlib.resources
from typing import ClassVar, Iterable, Sequence, cast
from pants.backend.python.target_types import ConsoleScript, EntryPoint, MainSpecification
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.lockfile_metadata import calculate_invalidation_digest
from pants.backend.python.util_rules.pex import (
Lockfile,
LockfileContent,
PexRequirements,
ToolCustomLockfile,
ToolDefaultLockfile,
)
from pants.engine.fs import FileContent
from pants.option.errors import OptionsError
from pants.option.subsystem import Subsystem
from pants.util.ordered_set import FrozenOrderedSet
DEFAULT_TOOL_LOCKFILE = "<default>"
NO_TOOL_LOCKFILE = "<none>"
class PythonToolRequirementsBase(Subsystem):
"""Base class for subsystems that configure a set of requirements for a python tool."""
# Subclasses must set.
default_version: ClassVar[str]
# Subclasses do not need to override.
default_extra_requirements: ClassVar[Sequence[str]] = []
default_interpreter_constraints: ClassVar[Sequence[str]] = []
register_interpreter_constraints: ClassVar[bool] = False
# If this tool does not mix with user requirements (e.g. Flake8 and Isort, but not Pylint and
# Pytest), you should set this to True.
#
# You also need to subclass `PythonToolLockfileSentinel` and create a rule that goes from
# it -> PythonToolLockfileRequest by calling `PythonLockFileRequest.from_python_tool()`.
# Register the UnionRule.
register_lockfile: ClassVar[bool] = False
default_lockfile_resource: ClassVar[tuple[str, str] | None] = None
default_lockfile_url: ClassVar[str | None] = None
uses_requirements_from_source_plugins: ClassVar[bool] = False
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--version",
type=str,
advanced=True,
default=cls.default_version,
help="Requirement string for the tool.",
)
register(
"--extra-requirements",
type=list,
member_type=str,
advanced=True,
default=cls.default_extra_requirements,
help="Any additional requirement strings to use with the tool. This is useful if the "
"tool allows you to install plugins or if you need to constrain a dependency to "
"a certain version.",
)
if cls.default_interpreter_constraints and not cls.register_interpreter_constraints:
raise ValueError(
f"`default_interpreter_constraints` are configured for `{cls.options_scope}`, but "
"`register_interpreter_constraints` is not set to `True`, so the "
"`--interpreter-constraints` option will not be registered. Did you mean to set "
"this?"
)
if cls.register_interpreter_constraints:
register(
"--interpreter-constraints",
type=list,
advanced=True,
default=cls.default_interpreter_constraints,
help="Python interpreter constraints for this tool.",
)
if cls.register_lockfile and (
not cls.default_lockfile_resource or not cls.default_lockfile_url
):
raise ValueError(
"The class property `default_lockfile_resource` and `default_lockfile_url` "
f"must be set if `register_lockfile` is set. See `{cls.options_scope}`."
)
if cls.register_lockfile:
register(
"--lockfile",
type=str,
default=DEFAULT_TOOL_LOCKFILE,
advanced=True,
help=(
"Path to a lockfile used for installing the tool.\n\n"
f"Set to the string `{DEFAULT_TOOL_LOCKFILE}` to use a lockfile provided by "
"Pants, so long as you have not changed the `--version` and "
"`--extra-requirements` options, and the tool's interpreter constraints are "
"compatible with the default. Pants will error or warn if the lockfile is not "
"compatible (controlled by `[python-setup].invalid_lockfile_behavior`). See "
f"{cls.default_lockfile_url} for the default lockfile contents.\n\n"
f"Set to the string `{NO_TOOL_LOCKFILE}` to opt out of using a lockfile. We "
f"do not recommend this, though, as lockfiles are essential for reproducible "
f"builds.\n\n"
"To use a custom lockfile, set this option to a file path relative to the "
f"build root, then run `./pants generate-lockfiles "
f"--resolve={cls.options_scope}`.\n\n"
"Lockfile generation currently does not wire up the `[python-repos]` options. "
"If lockfile generation fails, you can manually generate a lockfile, such as "
"by using pip-compile or `pip freeze`. Set this option to the path to your "
"manually generated lockfile. When manually maintaining lockfiles, set "
"`[python-setup].invalid_lockfile_behavior = 'ignore'`."
),
)
@property
def version(self) -> str:
return cast(str, self.options.version)
@property
def extra_requirements(self) -> tuple[str, ...]:
return tuple(self.options.extra_requirements)
@property
def all_requirements(self) -> tuple[str, ...]:
"""All the raw requirement strings to install the tool.
This may not include transitive dependencies: these are top-level requirements.
"""
return (self.version, *self.extra_requirements)
def pex_requirements(
self,
*,
extra_requirements: Iterable[str] = (),
) -> PexRequirements | Lockfile | LockfileContent:
"""The requirements to be used when installing the tool.
If the tool supports lockfiles, the returned type will install from the lockfile rather than
`all_requirements`.
"""
requirements = (*self.all_requirements, *extra_requirements)
if not self.uses_lockfile:
return PexRequirements(requirements)
hex_digest = calculate_invalidation_digest(requirements)
if self.lockfile == DEFAULT_TOOL_LOCKFILE:
assert self.default_lockfile_resource is not None
return ToolDefaultLockfile(
file_content=FileContent(
f"{self.options_scope}_default_lockfile.txt",
importlib.resources.read_binary(*self.default_lockfile_resource),
),
lockfile_hex_digest=hex_digest,
req_strings=FrozenOrderedSet(requirements),
options_scope_name=self.options_scope,
uses_project_interpreter_constraints=(not self.register_interpreter_constraints),
uses_source_plugins=self.uses_requirements_from_source_plugins,
)
return ToolCustomLockfile(
file_path=self.lockfile,
file_path_description_of_origin=f"the option `[{self.options_scope}].lockfile`",
lockfile_hex_digest=hex_digest,
req_strings=FrozenOrderedSet(requirements),
options_scope_name=self.options_scope,
uses_project_interpreter_constraints=(not self.register_interpreter_constraints),
uses_source_plugins=self.uses_requirements_from_source_plugins,
)
@property
def lockfile(self) -> str:
f"""The path to a lockfile or special strings '{NO_TOOL_LOCKFILE}' and '{DEFAULT_TOOL_LOCKFILE}'.
This assumes you have set the class property `register_lockfile = True`.
"""
return cast(str, self.options.lockfile)
@property
def uses_lockfile(self) -> bool:
return self.register_lockfile and self.lockfile != NO_TOOL_LOCKFILE
@property
def interpreter_constraints(self) -> InterpreterConstraints:
"""The interpreter constraints to use when installing and running the tool.
This assumes you have set the class property `register_interpreter_constraints = True`.
"""
return InterpreterConstraints(self.options.interpreter_constraints)
class PythonToolBase(PythonToolRequirementsBase):
"""Base class for subsystems that configure a python tool to be invoked out-of-process."""
# Subclasses must set.
default_main: ClassVar[MainSpecification]
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--console-script",
type=str,
advanced=True,
default=cls.default_main.spec if isinstance(cls.default_main, ConsoleScript) else None,
help=(
"The console script for the tool. Using this option is generally preferable to "
"(and mutually exclusive with) specifying an --entry-point since console script "
"names have a higher expectation of staying stable across releases of the tool. "
"Usually, you will not want to change this from the default."
),
)
register(
"--entry-point",
type=str,
advanced=True,
default=cls.default_main.spec if isinstance(cls.default_main, EntryPoint) else None,
help=(
"The entry point for the tool. Generally you only want to use this option if the "
"tool does not offer a --console-script (which this option is mutually exclusive "
"with). Usually, you will not want to change this from the default."
),
)
@property
def main(self) -> MainSpecification:
is_default_console_script = self.options.is_default("console_script")
is_default_entry_point = self.options.is_default("entry_point")
if not is_default_console_script and not is_default_entry_point:
raise OptionsError(
f"Both [{self.options_scope}].console-script={self.options.console_script} and "
f"[{self.options_scope}].entry-point={self.options.entry_point} are configured "
f"but these options are mutually exclusive. Please pick one."
)
if not is_default_console_script:
return ConsoleScript(cast(str, self.options.console_script))
if not is_default_entry_point:
return EntryPoint.parse(cast(str, self.options.entry_point))
return self.default_main
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
import numpy as np
from pandas import (DataFrame, Series, Timestamp, _np_version_under1p11)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import _np_version_under1p9
from pandas.tests.frame.common import TestData
class TestDataFrameQuantile(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
tm.assert_index_equal(q.index, self.tsframe.columns)
q = self.tsframe.quantile(0.9, axis=1)
self.assertEqual(q['2000-01-17'],
percentile(self.tsframe.loc['2000-01-17'], 90))
tm.assert_index_equal(q.index, self.tsframe.index)
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1': ['A', 'A', 'B', 'B'], 'col2': [1, 2, 3, 4]})
rs = df.quantile(0.5)
xp = df.median().rename(0.5)
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=[0.5, 0.75])
assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1], name=0.5)
assert_series_equal(result, expected)
def test_quantile_axis_mixed(self):
# mixed on axis=1
df = DataFrame({"A": [1, 2, 3],
"B": [2., 3., 4.],
"C": pd.date_range('20130101', periods=3),
"D": ['foo', 'bar', 'baz']})
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], name=0.5)
assert_series_equal(result, expected)
# must raise
def f():
df.quantile(.5, axis=1, numeric_only=False)
self.assertRaises(TypeError, f)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=0)
expected = Series([2., 3.], index=["A", "B"], name=0.5)
assert_series_equal(result, expected)
expected = df.quantile(.5, axis="index")
assert_series_equal(result, expected)
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
self.assertRaises(ValueError, df.quantile, 0.1, axis=-1)
self.assertRaises(ValueError, df.quantile, 0.1, axis="column")
def test_quantile_interpolation(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
self.assertEqual(q1['A'], np.percentile(self.intframe['A'], 10))
assert_series_equal(q, q1)
# interpolation method other than default linear
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
# cross-check interpolation=nearest results in original dtype
exp = np.percentile(np.array([[1, 2, 3], [2, 3, 4]]), .5,
axis=0, interpolation='nearest')
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype='int64')
assert_series_equal(result, expected)
# float
df = DataFrame({"A": [1., 2., 3.], "B": [2., 3., 4.]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1., 2., 3.], index=[1, 2, 3], name=0.5)
assert_series_equal(result, expected)
exp = np.percentile(np.array([[1., 2., 3.], [2., 3., 4.]]), .5,
axis=0, interpolation='nearest')
expected = Series(exp, index=[1, 2, 3], name=0.5, dtype='float64')
assert_series_equal(result, expected)
# axis
result = df.quantile([.5, .75], axis=1, interpolation='lower')
expected = DataFrame({1: [1., 1.], 2: [2., 2.],
3: [3., 3.]}, index=[0.5, 0.75])
assert_frame_equal(result, expected)
# test degenerate case
df = DataFrame({'x': [], 'y': []})
q = df.quantile(0.1, axis=0, interpolation='higher')
assert(np.isnan(q['x']) and np.isnan(q['y']))
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5], interpolation='midpoint')
# https://github.com/numpy/numpy/issues/7163
if _np_version_under1p11:
expected = DataFrame([[1.5, 1.5, 1.5], [2.5, 2.5, 2.5]],
index=[.25, .5], columns=['a', 'b', 'c'])
else:
expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
if not _np_version_under1p9:
raise nose.SkipTest("Numpy version is greater than 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
self.assertEqual(q1['A'], np.percentile(self.intframe['A'], 10))
assert_series_equal(q, q1)
# interpolation method other than default linear
expErrMsg = "Interpolation methods other than linear"
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile(.5, axis=1, interpolation='nearest')
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile([.5, .75], axis=1, interpolation='lower')
# test degenerate case
df = DataFrame({'x': [], 'y': []})
with assertRaisesRegexp(ValueError, expErrMsg):
q = df.quantile(0.1, axis=0, interpolation='higher')
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile([.25, .5], interpolation='midpoint')
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'],
name=0.5)
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1],
name=0.5)
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
# empty when numeric_only=True
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# result = df[['a', 'c']].quantile(.5)
# result = df[['a', 'c']].quantile([.5])
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.tsframe.quantile(invalid)
def test_quantile_box(self):
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
'B': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]})
res = df.quantile(0.5, numeric_only=False)
exp = pd.Series([pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days')],
name=0.5, index=['A', 'B', 'C'])
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = pd.DataFrame([[pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days')]],
index=[0.5], columns=['A', 'B', 'C'])
tm.assert_frame_equal(res, exp)
# DatetimeBlock may be consolidated and contain NaT in different loc
df = DataFrame({'A': [pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.NaT,
pd.Timestamp('2011-01-03')],
'B': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.NaT,
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.NaT,
pd.Timestamp('2011-01-03', tz='US/Eastern')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.NaT],
'c': [pd.NaT,
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days')]},
columns=list('AaBbCc'))
res = df.quantile(0.5, numeric_only=False)
exp = pd.Series([pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days'),
pd.Timedelta('2 days')],
name=0.5, index=list('AaBbCc'))
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = pd.DataFrame([[pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timedelta('2 days'),
pd.Timedelta('2 days')]],
index=[0.5], columns=list('AaBbCc'))
tm.assert_frame_equal(res, exp)
def test_quantile_nan(self):
# GH 14357 - float block where some cols have missing values
df = DataFrame({'a': np.arange(1, 6.0), 'b': np.arange(1, 6.0)})
df.iloc[-1, 1] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, 2.5], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({'a': [3.0, 4.0], 'b': [2.5, 3.25]}, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
res = df.quantile(0.5, axis=1)
exp = Series(np.arange(1.0, 6.0), name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75], axis=1)
exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
# full-nan column
df['b'] = np.nan
res = df.quantile(0.5)
exp = Series([3.0, np.nan], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5, 0.75])
exp = DataFrame({'a': [3.0, 4.0], 'b': [np.nan, np.nan]},
index=[0.5, 0.75])
tm.assert_frame_equal(res, exp)
def test_quantile_nat(self):
# full NaT column
df = DataFrame({'a': [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.NaT], index=['a'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame({'a': [pd.NaT]}, index=[0.5])
tm.assert_frame_equal(res, exp)
# mixed non-null / full null column
df = DataFrame({'a': [pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03')],
'b': [pd.NaT, pd.NaT, pd.NaT]})
res = df.quantile(0.5, numeric_only=False)
exp = Series([pd.Timestamp('2012-01-02'), pd.NaT], index=['a', 'b'],
name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5], numeric_only=False)
exp = DataFrame([[pd.Timestamp('2012-01-02'), pd.NaT]], index=[0.5],
columns=['a', 'b'])
tm.assert_frame_equal(res, exp)
def test_quantile_empty(self):
# floats
df = DataFrame(columns=['a', 'b'], dtype='float64')
res = df.quantile(0.5)
exp = Series([np.nan, np.nan], index=['a', 'b'], name=0.5)
tm.assert_series_equal(res, exp)
res = df.quantile([0.5])
exp = DataFrame([[np.nan, np.nan]], columns=['a', 'b'], index=[0.5])
tm.assert_frame_equal(res, exp)
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5, axis=1)
# res = df.quantile([0.5], axis=1)
# ints
df = DataFrame(columns=['a', 'b'], dtype='int64')
# FIXME (gives empty frame in 0.18.1, broken in 0.19.0)
# res = df.quantile(0.5)
# datetimes
df = DataFrame(columns=['a', 'b'], dtype='datetime64')
# FIXME (gives NaNs instead of NaT in 0.18.1 or 0.19.0)
# res = df.quantile(0.5, numeric_only=False)
|
|
import cv2
import numpy as np
import argparse
from matplotlib import pyplot as plt
''' Support library for jersy number recognition demo '''
#rectangle-rectangle intersection check
def intersectRect(rect1,rect2):
if (rect1[0] > rect2[2] or rect1[2] < rect2[0] or rect1[1] > rect2[3] or rect1[3] < rect2[1]):
return False
else:
return True
#helper function to determine if, given an existing known region (oldBound) containing a significant
#gradient, whether a new region (testBound) also contains a signficant amount of gradient
#TLDR: if testBound mostly contains no-gradient pixels, this will fail
def testNewRegion(image, oldBound, testBound):
testBound[0] = max(testBound[0], 0);
testBound[1] = max(testBound[1], 0);
testBound[2] = min(testBound[2], image.shape[1]);
testBound[3] = min(testBound[3], image.shape[0]);
temp = image[testBound[1]:testBound[3], testBound[0]:testBound[2]];
startEnergy = np.sum(temp);
startArea = float((testBound[2] - testBound[0]) * (testBound[3] - testBound[1]));
temp = image[oldBound[1]:oldBound[3], oldBound[0]:oldBound[2]];
newEnergy = np.sum(temp);
newArea = float((oldBound[2] - oldBound[0]) * (oldBound[3] - oldBound[1]));
outbound = oldBound.copy()
if (newArea != startArea):
if ( (newEnergy -startEnergy) / (newArea - startArea) > startEnergy / startArea / 1.5):
outbound = testBound.copy()
return outbound
#given a start region (rect) ... and a color and grayscale image, expand the region
#until the gradient intensity starts to drop off
def refineTorso(rect, image, imageGray):
#crop out the initial region
cropGray = imageGray[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]]
cropColor = image[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]]
gradwidth = 3; #Gaussian kernel blur size
#compute and blur the gradient
grad = cv2.Laplacian(cropGray, cv2.CV_64F)
grad = np.multiply(grad, grad); #magnitude
gradBlur = cv2.GaussianBlur(grad, (gradwidth, gradwidth), 0)
#compute the center of mass of the cropped region
#using the gradient intensity as the 'mass'
xs = np.arange(cropGray.shape[1]);
ys = np.arange(cropGray.shape[0]);
gridx, gridy = np.meshgrid(xs, ys);
#mask the gradient to the white areas
#the jersey numbers with either be WHITE ... or next to WHITE ... so this does a decent job of
#masking out the gradients not associated with jersey numbers
mask = cv2.inRange(cropColor, np.array([200, 200, 200]), np.array([255, 255, 255])) / 255
maskedGrad = np.multiply(gradBlur, mask)
#compute the center-of-mass using the masked gradient
xm = np.multiply(gridx, maskedGrad)
ym = np.multiply(gridy, maskedGrad)
xm = np.sum(xm)
ym = np.sum(ym)
gradSum = np.sum(maskedGrad);
if (gradSum > 0):
xm = xm / gradSum
if (gradSum > 0):
ym = ym / gradSum
#initialize a starting region around the center of gradient mass
cx = int(np.round(xm))
cy = int(np.round(ym))
bound = np.array([cx - 3, cy - 3, cx + 3, cy + 3])
oldBound = bound.copy();
#we will test against the blurred gradient
#to expand our region until most of the local gradient is contained
regionTestImage = gradBlur
#while expanding the region gains significant gradient
while True:
#test expansion in all four directions, if there is improvement
#expand the region
testBound = bound.copy()
testBound[0] = testBound[0] - 5
bound = testNewRegion(regionTestImage, bound, testBound)
testBound = bound.copy()
testBound[1] = testBound[1] - 5
bound = testNewRegion(regionTestImage, bound, testBound)
testBound = bound.copy()
testBound[2] = testBound[2] + 5
bound = testNewRegion(regionTestImage, bound, testBound)
testBound = bound.copy()
testBound[3] = testBound[3] + 5
bound = testNewRegion(regionTestImage, bound, testBound)
#if no expansion significant increases the enclosed gradient
#exit out
if (np.array_equal(bound, oldBound) == True):
break;
oldBound = bound.copy();
#reforumlate the region as a bounding box
cx = np.round((bound[0] + bound[2]) / 2);
cy = np.round((bound[1] + bound[3]) / 2);
dx = (bound[2] - bound[0])/2;
dy = (bound[3] - bound[1])/2;
#add some padding so the deep net won't be confused
dx = np.round(dx*1.5);
dy = np.round(dy*1.5);
dx = max(dx,dy);
dy = max(dx,dy);
#compose a bounding rect
bound[0] = max(rect[0] + cx - dx,0);
bound[1] = max(rect[1] + cy - dy,0);
bound[2] = min(rect[0] + cx + dx, image.shape[1]);
bound[3] = min(rect[1] + cy + dy, image.shape[0]);
return bound.copy()
#use k-means to get the main color regions of the frame
def separateFrame(frame,K):
#reformat the frame
input = frame.reshape((-1, 3))
# convert to np.float32
input = np.float32(input)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, label, center = cv2.kmeans(input, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
np.uint8(label)
#binstore will store the binary images of each of the extracted regions
binstore = np.ndarray([K, frame.shape[0], frame.shape[1]], dtype='uint8')
sums = np.zeros([K], dtype=int);
intensities = np.zeros([K], dtype=int);
#get an image of cluster membership numbers for each pixel
label = np.uint8(label)
label = label.flatten()
label = label.reshape(frame.shape[0], frame.shape[1])
#determine WHICH of these clusters codes the white areas in the image (jerseys, lines, etc.)
white_label = 0
white_max = -1
kernel = np.array([[1,1,1,1]], np.uint8)
#find the whitest cluster center
for i in np.arange(0, K):
res = cv2.inRange(label, i, i);
res = cv2.dilate(res, kernel, iterations=1)
binstore[i, :, :] = res
sums[i] = np.sum(res)
intensity = np.sum(center[i, :])
intensities[i] = intensity
if (intensity > white_max):
white_max = intensity
white_label = i
#now find which of the clusters codes the grass areas ... we'll start with the one that has the largest
#area (number of pixels)
grass_label = np.argmax(sums)
'''We may have more than one grass or 'white' regions ... i.e., 'white' or 'grass' may have been broken'''
'''down into multiple clusters ... go ahead and merge these clusters'''
allNums = np.arange(K)
'''if we have more grass layers, assign them to the initial grass layer'''
# if we have another cluster that has at least 25% of the main grass cluster, it's also probably grass
sumIdx = np.argwhere(sums > sums[grass_label] / 4.0);
for i in np.arange(len(sumIdx)):
label = np.where(label == np.uint8(sumIdx[i][0]), np.uint8(grass_label), label)
allNums = np.where(allNums == sumIdx[i][0],grass_label, allNums)
'''if we have more than one white layer, merge them into the intial white layer'''
inIdx = np.argwhere(intensities > 400);
for i in np.arange(len(inIdx)):
label = np.where(label == np.uint8(inIdx[i][0]), np.uint8(white_label), label)
allNums = np.where(allNums == inIdx[i][0],white_label, allNums)
'''now compute binary membership images for each cluster'''
for i in np.arange(0, K):
res = cv2.inRange(label, i, i);
res = cv2.dilate(res, kernel, iterations=1)
binstore[i, :, :] = res
sums[i] = np.sum(res)
return binstore, label, center, grass_label, white_label
#cases to handle two different encoding for the bounding rects
RECT_MODE_XYWH = 0
RECT_MODE_X1Y1X2Y2 = 1
#do some heuristic checks to see if test regions can be discarded
def cleanRects(inRects, minDx, minDy, labels, grass_label, white_label,mode):
delInds = [];
for i in np.arange(inRects.shape[0]):
rect = inRects[i,:]; #this evaluation rect ...
x = rect[0];
y = rect[1];
if (mode == RECT_MODE_XYWH):
w = rect[2];
h = rect[3];
else: #if mode == RECT_MODE_X1Y1X2Y2
w = rect[2] - rect[0]
h = rect[3] - rect[1]
keep = False
if (w > minDx and h > minDy): #make sure the region is at least large enough
#test the grass and non-pure-white percentage contents of this region
grassPercentage, nonWhitePercentage = testGrassBox(labels, grass_label, white_label, [x, y, w, h])
# heuristics: if this region isn't mostly grass, or mostly white
if (not (grassPercentage > 0.35 or nonWhitePercentage < 0.025)):
keep = True
if (keep == False):
delInds.append(i)
outRects = inRects;
if len(delInds) > 0:
outRects = np.delete(inRects,delInds,0)
return outRects
#just do simple contouring to get the initial regions of interest
def getLabelRects(mask):
outrects = [];
_, cnts, _ = cv2.findContours(mask, 1, 2)
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
outrects.append([x,y,w,h])
return outrects
#get the percentage of this region that is grass or not pure white
def testGrassBox(labels,grass_label, white_label,rect):
grass_count = cv2.inRange(labels[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]], grass_label, grass_label);
white_count = cv2.inRange(labels[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2]], white_label, white_label);
grass_count = float(np.sum(grass_count)/255);
white_count = float(np.sum(white_count)/255);
area = float(rect[2]*rect[3]);
grassPercentage = grass_count / (area);
nonWhitePercentage = (area - grass_count - white_count) / area;
return grassPercentage, nonWhitePercentage
#line box intersection check
def lineBoxIntersect(rx1,ry1,rx2,ry2,tx1,ty1,tx2,ty2):
x1 = min(tx1,tx2)
y1 = min(ty1,ty2)
x2 = max(tx1,tx2)
y2 = max(ty1,ty2)
x = x1
y = y1
fc1 = np.sign((ry2-ry1)*x + (rx1-rx2)*y + (rx2*ry1-rx1*ry2))
x = x1
y = y2
fc2 = np.sign((ry2-ry1)*x + (rx1-rx2)*y + (rx2*ry1-rx1*ry2))
x = x2
y = y1
fc3 = np.sign((ry2-ry1)*x + (rx1-rx2)*y + (rx2*ry1-rx1*ry2))
x = x2
y = y2
fc4 = np.sign((ry2-ry1)*x + (rx1-rx2)*y + (rx2*ry1-rx1*ry2))
if (fc1 == fc2) and (fc2 == fc3) and (fc3 == fc4):
return False
if (rx1 > x2 and rx2 > x2):
return False
if (rx1 < x1 and rx2 < x1):
return False
if (ry1 > y2 and ry2 > y2):
return False
if (ry1 < y1 and ry2 < y1):
return False
return True
#get the lines on the field ... sidelines, line markers, etc.
def getLines(source):
minLineLength = 10
maxLineGap = 100
lines = cv2.HoughLinesP(image=source, rho=0.02, theta=np.pi / 360, threshold=1,
lines=np.array([]),
minLineLength=200, maxLineGap=10)
return lines
#compare two HSV colors
def compareColor(c1,c2):
h1 = np.float(c1[0]) * 2.0 * np.pi / 180.0;
h2 = np.float(c2[0]) * 2.0 * np.pi / 180.0;
i1 = np.float(c1[2]) / 255.0;
i2 = np.float(c2[2]) / 255.0;
hip = np.cos(h1)*np.cos(h2) + np.sin(h1)*np.sin(h2)
return hip;
#get the mean HSV value of a colored image patch
def getPatchHSV(image):
numPixels = image.shape[0]*image.shape[1]
avgColor = np.reshape(image, (numPixels, 3))
avgColor = np.uint8([[np.mean(avgColor, 0)]])
hsv = cv2.cvtColor(avgColor, cv2.COLOR_BGR2HSV)
return avgColor[0,0,:], hsv[0,0,:]
#mass check if a set of boxes intersects a line
def linesBoxIntersect(lines,tx1,ty1,tx2,ty2):
box = ([tx1,ty1,tx2,ty2]);
a = lines.shape[0]
for i in range(a):
line = lines[i][0][:];
if (np.abs(line[0] - line[2]) > 100.0*np.abs(line[1]-line[3]) and line[1] < 720/2): #horizontal line
if ty1 < line[1] or ty2 < line[3]:
return True
inter = lineBoxIntersect(line[0], line[1], line[2], line[3], box[0], box[1], box[2], box[3])
if inter == True:
return True
return False
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import sys
LOG_LEVEL_INDEX = sys.argv.index('--log_level') + 1 if '--log_level' in sys.argv else 0
DESIRED_LOG_LEVEL = sys.argv[LOG_LEVEL_INDEX] if 0 < LOG_LEVEL_INDEX < len(sys.argv) else '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = DESIRED_LOG_LEVEL
import absl.app
import numpy as np
import progressbar
import shutil
import tensorflow as tf
import tensorflow.compat.v1 as tfv1
import time
import pickle as pkl
from tensorflow.python.ops import gen_audio_ops as contrib_audio
tfv1.logging.set_verbosity({
'0': tfv1.logging.DEBUG,
'1': tfv1.logging.INFO,
'2': tfv1.logging.WARN,
'3': tfv1.logging.ERROR
}.get(DESIRED_LOG_LEVEL))
from datetime import datetime
from ds_ctcdecoder import ctc_beam_search_decoder, Scorer
from .evaluate import evaluate
from six.moves import zip, range
from .util.config import Config, initialize_globals
from .util.checkpoints import load_or_init_graph_for_training, load_graph_for_evaluation
from .util.evaluate_tools import save_samples_json
from .util.feeding import create_dataset, samples_to_mfccs, audiofile_to_features
from src.flags import create_flags, FLAGS
from .util.helpers import check_ctcdecoder_version, ExceptionBox
from .util.logging import create_progressbar, log_debug, log_error, log_info, log_warn
check_ctcdecoder_version()
# Graph Creation
# ==============
def variable_on_cpu(name, shape, initializer):
r"""
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_cpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device(Config.cpu_device):
# Create or get apropos variable
var = tfv1.get_variable(name=name, shape=shape, initializer=initializer)
return var
def create_overlapping_windows(batch_x):
batch_size = tf.shape(input=batch_x)[0]
window_width = 2 * Config.n_context + 1
num_channels = Config.n_input
# Create a constant convolution filter using an identity matrix, so that the
# convolution returns patches of the input tensor as is, and we can create
# overlapping windows over the MFCCs.
eye_filter = tf.constant(np.eye(window_width * num_channels)
.reshape(window_width, num_channels, window_width * num_channels), tf.float32) # pylint: disable=bad-continuation
# Create overlapping windows
batch_x = tf.nn.conv1d(input=batch_x, filters=eye_filter, stride=1, padding='SAME')
# Remove dummy depth dimension and reshape into [batch_size, n_windows, window_width, n_input]
batch_x = tf.reshape(batch_x, [batch_size, -1, window_width, num_channels])
return batch_x
def dense(name, x, units, dropout_rate=None, relu=True):
with tfv1.variable_scope(name):
bias = variable_on_cpu('bias', [units], tfv1.zeros_initializer())
weights = variable_on_cpu('weights', [x.shape[-1], units], tfv1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"))
output = tf.nn.bias_add(tf.matmul(x, weights), bias)
if relu:
output = tf.minimum(tf.nn.relu(output), FLAGS.relu_clip)
if dropout_rate is not None:
output = tf.nn.dropout(output, rate=dropout_rate)
return output
def rnn_impl_lstmblockfusedcell(x, seq_length, previous_state, reuse):
with tfv1.variable_scope('cudnn_lstm/rnn/multi_rnn_cell/cell_0'):
fw_cell = tf.contrib.rnn.LSTMBlockFusedCell(Config.n_cell_dim,
forget_bias=0,
reuse=reuse,
name='cudnn_compatible_lstm_cell')
output, output_state = fw_cell(inputs=x,
dtype=tf.float32,
sequence_length=seq_length,
initial_state=previous_state)
return output, output_state
def rnn_impl_cudnn_rnn(x, seq_length, previous_state, _):
assert previous_state is None # 'Passing previous state not supported with CuDNN backend'
# Hack: CudnnLSTM works similarly to Keras layers in that when you instantiate
# the object it creates the variables, and then you just call it several times
# to enable variable re-use. Because all of our code is structure in an old
# school TensorFlow structure where you can just call tf.get_variable again with
# reuse=True to reuse variables, we can't easily make use of the object oriented
# way CudnnLSTM is implemented, so we save a singleton instance in the function,
# emulating a static function variable.
if not rnn_impl_cudnn_rnn.cell:
# Forward direction cell:
fw_cell = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers=1,
num_units=Config.n_cell_dim,
input_mode='linear_input',
direction='unidirectional',
dtype=tf.float32)
rnn_impl_cudnn_rnn.cell = fw_cell
output, output_state = rnn_impl_cudnn_rnn.cell(inputs=x,
sequence_lengths=seq_length)
return output, output_state
rnn_impl_cudnn_rnn.cell = None
def rnn_impl_static_rnn(x, seq_length, previous_state, reuse):
with tfv1.variable_scope('cudnn_lstm/rnn/multi_rnn_cell'):
# Forward direction cell:
fw_cell = tfv1.nn.rnn_cell.LSTMCell(Config.n_cell_dim,
forget_bias=0,
reuse=reuse,
name='cudnn_compatible_lstm_cell')
# Split rank N tensor into list of rank N-1 tensors
x = [x[l] for l in range(x.shape[0])]
output, output_state = tfv1.nn.static_rnn(cell=fw_cell,
inputs=x,
sequence_length=seq_length,
initial_state=previous_state,
dtype=tf.float32,
scope='cell_0')
output = tf.concat(output, 0)
return output, output_state
def create_model(batch_x, seq_length, dropout, reuse=False, batch_size=None, previous_state=None, overlap=True, rnn_impl=rnn_impl_lstmblockfusedcell):
layers = {}
# Input shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
if not batch_size:
batch_size = tf.shape(input=batch_x)[0]
layers['input'] = batch_x
# Create overlapping feature windows if needed
if overlap:
batch_x = create_overlapping_windows(batch_x)
# Reshaping `batch_x` to a tensor with shape `[n_steps*batch_size, n_input + 2*n_input*n_context]`.
# This is done to prepare the batch for input into the first layer which expects a tensor of rank `2`.
# Permute n_steps and batch_size
batch_x = tf.transpose(a=batch_x, perm=[1, 0, 2, 3])
# Reshape to prepare input for first layer
batch_x = tf.reshape(batch_x, [-1, Config.n_input + 2*Config.n_input*Config.n_context]) # (n_steps*batch_size, n_input + 2*n_input*n_context)
layers['input_reshaped'] = batch_x
layers['input_length'] = seq_length
# The next three blocks will pass `batch_x` through three hidden layers with
# clipped RELU activation and dropout.
layers['layer_1'] = layer_1 = dense('layer_1', batch_x, Config.n_hidden_1, dropout_rate=dropout[0])
layers['layer_2'] = layer_2 = dense('layer_2', layer_1, Config.n_hidden_2, dropout_rate=dropout[1])
layers['layer_3'] = layer_3 = dense('layer_3', layer_2, Config.n_hidden_3, dropout_rate=dropout[2])
# `layer_3` is now reshaped into `[n_steps, batch_size, 2*n_cell_dim]`,
# as the LSTM RNN expects its input to be of shape `[max_time, batch_size, input_size]`.
layer_3 = tf.reshape(layer_3, [-1, batch_size, Config.n_hidden_3])
# Run through parametrized RNN implementation, as we use different RNNs
# for training and inference
output, output_state = rnn_impl(layer_3, seq_length, previous_state, reuse)
# Reshape output from a tensor of shape [n_steps, batch_size, n_cell_dim]
# to a tensor of shape [n_steps*batch_size, n_cell_dim]
output = tf.reshape(output, [-1, Config.n_cell_dim])
layers['rnn_output'] = output
layers['rnn_output_state'] = output_state
# Now we feed `output` to the fifth hidden layer with clipped RELU activation
layers['layer_5'] = layer_5 = dense('layer_5', output, Config.n_hidden_5, dropout_rate=dropout[5])
# Now we apply a final linear layer creating `n_classes` dimensional vectors, the logits.
layers['layer_6'] = layer_6 = dense('layer_6', layer_5, Config.n_hidden_6, relu=False)
# Finally we reshape layer_6 from a tensor of shape [n_steps*batch_size, n_hidden_6]
# to the slightly more useful shape [n_steps, batch_size, n_hidden_6].
# Note, that this differs from the input in that it is time-major.
layer_6 = tf.reshape(layer_6, [-1, batch_size, Config.n_hidden_6], name='raw_logits')
layers['raw_logits'] = layer_6
# Output shape: [n_steps, batch_size, n_hidden_6]
return layer_6, layers
# Accuracy and Loss
# =================
# In accord with 'Deep Speech: Scaling up end-to-end speech recognition'
# (http://arxiv.org/abs/1412.5567),
# the loss function used by our network should be the CTC loss function
# (http://www.cs.toronto.edu/~graves/preprint.pdf).
# Conveniently, this loss function is implemented in TensorFlow.
# Thus, we can simply make use of this implementation to define our loss.
def calculate_mean_edit_distance_and_loss(iterator, dropout, reuse):
r'''
This routine beam search decodes a mini-batch and calculates the loss and mean edit distance.
Next to total and average loss it returns the mean edit distance,
the decoded result and the batch's original Y.
'''
# Obtain the next batch of data
batch_filenames, (batch_x, batch_seq_len), batch_y = iterator.get_next()
def fn2audio(fn):
samples = tf.io.read_file(fn)
decoded = contrib_audio.decode_wav(samples, desired_channels=1)
return decoded.audio
# batch_audio = tf.map_fn(fn2audio, batch_filenames, dtype=tf.float32)
batch_audio = tf.constant(0)
if FLAGS.train_cudnn:
rnn_impl = rnn_impl_cudnn_rnn
else:
rnn_impl = rnn_impl_lstmblockfusedcell
# Calculate the logits of the batch
logits, layers = create_model(batch_x, batch_seq_len, dropout, reuse=reuse, rnn_impl=rnn_impl)
# Compute the CTC loss using TensorFlow's `ctc_loss`
total_loss = tfv1.nn.ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len)
# Check if any files lead to non finite loss
non_finite_files = tf.gather(batch_filenames, tfv1.where(~tf.math.is_finite(total_loss)))
# Calculate the average loss across the batch
avg_loss = tf.reduce_mean(input_tensor=total_loss)
# Finally we return the average loss
return avg_loss, non_finite_files, layers, batch_y, batch_audio
# Adam Optimization
# =================
# In contrast to 'Deep Speech: Scaling up end-to-end speech recognition'
# (http://arxiv.org/abs/1412.5567),
# in which 'Nesterov's Accelerated Gradient Descent'
# (www.cs.toronto.edu/~fritz/absps/momentum.pdf) was used,
# we will use the Adam method for optimization (http://arxiv.org/abs/1412.6980),
# because, generally, it requires less fine-tuning.
def create_optimizer(learning_rate_var, opt='sgd'):
if opt == 'adam':
return tfv1.train.AdamOptimizer(
learning_rate=learning_rate_var,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon)
elif opt == 'sgd':
return tfv1.train.GradientDescentOptimizer(learning_rate=learning_rate_var)
else:
raise ValueError
# Towers
# ======
# In order to properly make use of multiple GPU's, one must introduce new abstractions,
# not present when using a single GPU, that facilitate the multi-GPU use case.
# In particular, one must introduce a means to isolate the inference and gradient
# calculations on the various GPU's.
# The abstraction we intoduce for this purpose is called a 'tower'.
# A tower is specified by two properties:
# * **Scope** - A scope, as provided by `tf.name_scope()`,
# is a means to isolate the operations within a tower.
# For example, all operations within 'tower 0' could have their name prefixed with `tower_0/`.
# * **Device** - A hardware device, as provided by `tf.device()`,
# on which all operations within the tower execute.
# For example, all operations of 'tower 0' could execute on the first GPU `tf.device('/gpu:0')`.
def get_tower_results(iterator, optimizer, dropout_rates):
r'''
With this preliminary step out of the way, we can for each GPU introduce a
tower for which's batch we calculate and return the optimization gradients
and the average loss across towers.
'''
# To calculate the mean of the losses
tower_avg_losses = []
# Tower gradients to return
tower_gradients = []
# Aggregate any non finite files in the batches
tower_non_finite_files = []
with tfv1.variable_scope(tfv1.get_variable_scope()):
# Loop over available_devices
for i in range(len(Config.available_devices)):
# Execute operations of tower i on device i
device = Config.available_devices[i]
with tf.device(device):
# Create a scope for all operations of tower i
with tf.name_scope('tower_%d' % i):
# Calculate the avg_loss and mean_edit_distance and retrieve the decoded
# batch along with the original batch's labels (Y) of this tower
avg_loss, non_finite_files, layers, batch_y, batch_audio = calculate_mean_edit_distance_and_loss(iterator, dropout_rates, reuse=i > 0)
# Allow for variables to be re-used by the next tower
tfv1.get_variable_scope().reuse_variables()
# Retain tower's avg losses
tower_avg_losses.append(avg_loss)
# Compute gradients for model parameters using tower's mini-batch
gradients = optimizer.compute_gradients(avg_loss)
# Retain tower's gradients
tower_gradients.append(gradients)
tower_non_finite_files.append(non_finite_files)
avg_loss_across_towers = tf.reduce_mean(input_tensor=tower_avg_losses, axis=0)
tfv1.summary.scalar(name='step_loss', tensor=avg_loss_across_towers, collections=['step_summaries'])
all_non_finite_files = tf.concat(tower_non_finite_files, axis=0)
# Return gradients and the average loss
return tower_gradients, avg_loss_across_towers, all_non_finite_files, layers, batch_y, batch_audio
def average_gradients(tower_gradients):
r'''
A routine for computing each variable's average of the gradients obtained from the GPUs.
Note also that this code acts as a synchronization point as it requires all
GPUs to be finished with their mini-batch before it can run to completion.
'''
# List of average gradients to return to the caller
average_grads = []
# Run this on cpu_device to conserve GPU memory
with tf.device(Config.cpu_device):
# Loop over gradient/variable pairs from all towers
for grad_and_vars in zip(*tower_gradients):
# Introduce grads to store the gradients for the current variable
grads = []
# Loop over the gradients for the current variable
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(input_tensor=grad, axis=0)
# Create a gradient/variable tuple for the current variable with its average gradient
grad_and_var = (grad, grad_and_vars[0][1])
# Add the current tuple to average_grads
average_grads.append(grad_and_var)
# Return result to caller
return average_grads
# Logging
# =======
def log_variable(variable, gradient=None):
r'''
We introduce a function for logging a tensor variable's current state.
It logs scalar values for the mean, standard deviation, minimum and maximum.
Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
'''
name = variable.name.replace(':', '_')
mean = tf.reduce_mean(input_tensor=variable)
tfv1.summary.scalar(name='%s/mean' % name, tensor=mean)
tfv1.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(input_tensor=tf.square(variable - mean))))
tfv1.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(input_tensor=variable))
tfv1.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(input_tensor=variable))
tfv1.summary.histogram(name=name, values=variable)
if gradient is not None:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
tfv1.summary.histogram(name='%s/gradients' % name, values=grad_values)
def log_grads_and_vars(grads_and_vars):
r'''
Let's also introduce a helper function for logging collections of gradient/variable tuples.
'''
for gradient, variable in grads_and_vars:
log_variable(variable, gradient=gradient)
def train():
do_cache_dataset = True
# pylint: disable=too-many-boolean-expressions
if (FLAGS.data_aug_features_multiplicative > 0 or
FLAGS.data_aug_features_additive > 0 or
FLAGS.augmentation_spec_dropout_keeprate < 1 or
FLAGS.augmentation_freq_and_time_masking or
FLAGS.augmentation_pitch_and_tempo_scaling or
FLAGS.augmentation_speed_up_std > 0 or
FLAGS.augmentation_sparse_warp):
do_cache_dataset = False
exception_box = ExceptionBox()
# Create training and validation datasets
train_set = create_dataset(FLAGS.train_files.split(','),
batch_size=1 if not FLAGS.export_sample_only else FLAGS.train_batch_size,
enable_cache=FLAGS.feature_cache and do_cache_dataset,
cache_path=FLAGS.feature_cache,
train_phase=True,
exception_box=exception_box,
process_ahead=len(Config.available_devices) * (1 if not FLAGS.export_sample_only else FLAGS.train_batch_size) * 2,
buffering=FLAGS.read_buffer)
iterator = tfv1.data.Iterator.from_structure(tfv1.data.get_output_types(train_set),
tfv1.data.get_output_shapes(train_set),
output_classes=tfv1.data.get_output_classes(train_set))
# Make initialization ops for switching between the two sets
train_init_op = iterator.make_initializer(train_set)
# Dropout
dropout_rates = [tfv1.placeholder(tf.float32, name='dropout_{}'.format(i)) for i in range(6)]
dropout_feed_dict = {
dropout_rates[0]: FLAGS.dropout_rate,
dropout_rates[1]: FLAGS.dropout_rate2,
dropout_rates[2]: FLAGS.dropout_rate3,
dropout_rates[3]: FLAGS.dropout_rate4,
dropout_rates[4]: FLAGS.dropout_rate5,
dropout_rates[5]: FLAGS.dropout_rate6,
}
no_dropout_feed_dict = {
rate: 0. for rate in dropout_rates
}
# Building the graph
learning_rate_var = tf.constant(FLAGS.model_learning_rate)
# learning_rate_var = tfv1.get_variable('learning_rate', initializer=FLAGS.model_learning_rate, trainable=False)
# reduce_learning_rate_op = learning_rate_var.assign(tf.multiply(learning_rate_var, FLAGS.plateau_reduction))
optimizer = create_optimizer(learning_rate_var)
# Enable mixed precision training
if FLAGS.automatic_mixed_precision:
log_info('Enabling automatic mixed precision training.')
optimizer = tfv1.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)
gradients, loss, non_finite_files, layers, batch_y, batch_audio = get_tower_results(iterator, optimizer, dropout_rates)
# Average tower gradients across GPUs
avg_tower_gradients = average_gradients(gradients)
log_grads_and_vars(avg_tower_gradients)
# global_step is automagically incremented by the optimizer
global_step = tfv1.train.get_or_create_global_step()
apply_gradient_op = optimizer.apply_gradients(avg_tower_gradients, global_step=global_step)
# Summaries
step_summaries_op = tfv1.summary.merge_all('step_summaries')
step_summary_writers = {
'train': tfv1.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'train'), max_queue=120),
'dev': tfv1.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'dev'), max_queue=120)
}
# Save flags next to checkpoints
os.makedirs(FLAGS.save_checkpoint_dir, exist_ok=True)
flags_file = os.path.join(FLAGS.save_checkpoint_dir, 'flags.txt')
with open(flags_file, 'w') as fout:
fout.write(FLAGS.flags_into_string())
with tfv1.Session(config=Config.session_config) as session:
log_debug('Session opened.')
# Prevent further graph changes
# tfv1.get_default_graph().finalize()
# Load checkpoint or initialize variables
load_or_init_graph_for_training(session)
def run_set(set_name, epoch, init_op, dataset=None):
is_train = set_name == 'train'
train_op = apply_gradient_op if is_train else []
feed_dict = dropout_feed_dict if is_train else no_dropout_feed_dict
total_loss = 0.0
step_count = 0
step_summary_writer = step_summary_writers.get(set_name)
checkpoint_time = time.time()
# Initialize iterator to the appropriate dataset
session.run(init_op)
assert len(gradients) == 1
grads = gradients[0]
grads = {v.op.name: g for g, v in grads}
# Batch loop
try:
_batch_y = tf.sparse.to_dense(batch_y)
model_before = session.run(tf.trainable_variables())
assert FLAGS.num_steps == 1 or FLAGS.train_batch_size == 1
if FLAGS.num_steps > 1: # multi-step
for step in range(FLAGS.num_steps):
_, current_step, batch_loss, var_grads, audio, bx, bx_len, by, problem_files, step_summary = \
session.run([train_op, global_step, loss, grads, batch_audio, layers['input'], layers['input_length'], _batch_y, non_finite_files, step_summaries_op],
feed_dict=feed_dict)
model_after = session.run(tf.trainable_variables())
model_update = {v.op.name: model_before[i] - model_after[i] for i, v in
enumerate(tf.trainable_variables())}
elif FLAGS.train_batch_size > 1 and not FLAGS.export_sample_only: # multi-sample
all_var_grads = None
for sample_id in range(FLAGS.train_batch_size):
current_step, batch_loss, var_grads, problem_files, step_summary = \
session.run([global_step, loss, grads, non_finite_files, step_summaries_op],
feed_dict=dropout_feed_dict)
if all_var_grads is None:
all_var_grads = var_grads
else:
for op_name in all_var_grads:
all_var_grads[op_name] += var_grads[op_name]
for op_name in all_var_grads:
all_var_grads[op_name] /= FLAGS.train_batch_size
model_update = all_var_grads
else:
_, current_step, batch_loss, var_grads, audio, bx, bx_len, layer_1, by, problem_files, step_summary = \
session.run(
[train_op, global_step, loss, grads, batch_audio, layers['input'], layers['input_length'], layers['layer_1'],
_batch_y, non_finite_files, step_summaries_op],
feed_dict=feed_dict)
model_after = session.run(tf.trainable_variables())
model_update = {v.op.name: model_before[i] - model_after[i] for i, v in enumerate(tf.trainable_variables())}
os.makedirs(FLAGS.output_path or 'outputs', exist_ok=True)
if not FLAGS.export_sample_only:
fn = os.path.join(FLAGS.output_path or 'outputs', 'grads.pkl')
with open(fn, 'wb') as f:
pkl.dump(model_update, f)
print("Gradients written to %s" % fn)
if FLAGS.export_sample_only or FLAGS.train_batch_size == 1:
fn = os.path.join(FLAGS.output_path or 'outputs', 'samples.pkl')
with open(fn, 'wb') as f:
pkl.dump([audio, bx, bx_len, by], f)
print("Data sample written to %s" % fn)
if FLAGS.export_dropout_mask:
print('hello')
dropout_tensors = [n for n in tf.get_default_graph().get_operations() if 'dropout' in n.name]
print(dropout_tensors)
print(session.run([dropout_tensors[0]], feed_dict=feed_dict))
input()
print("Loss: " + str(batch_loss))
exception_box.raise_if_set()
except tf.errors.InvalidArgumentError as err:
if FLAGS.augmentation_sparse_warp:
log_info("Ignoring sparse warp error: {}".format(err))
raise
except tf.errors.OutOfRangeError:
exception_box.raise_if_set()
if problem_files.size > 0:
problem_files = [f.decode('utf8') for f in problem_files[..., 0]]
log_error('The following files caused an infinite (or NaN) '
'loss: {}'.format(','.join(problem_files)))
total_loss += batch_loss
step_count += 1
step_summary_writer.add_summary(step_summary, current_step)
mean_loss = total_loss / step_count if step_count > 0 else 0.0
return mean_loss, step_count
log_info('STARTING Optimization')
train_start_time = datetime.utcnow()
best_dev_loss = float('inf')
dev_losses = []
epochs_without_improvement = 0
try:
# Training
train_loss, _ = run_set('train', 0, train_init_op)
except KeyboardInterrupt:
pass
log_debug('Session closed.')
def test():
samples = evaluate(FLAGS.test_files.split(','), create_model)
if FLAGS.test_output_file:
save_samples_json(samples, FLAGS.test_output_file)
def create_inference_graph(batch_size=1, n_steps=16, tflite=False):
batch_size = batch_size if batch_size > 0 else None
# Create feature computation graph
input_samples = tfv1.placeholder(tf.float32, [Config.audio_window_samples], 'input_samples')
samples = tf.expand_dims(input_samples, -1)
mfccs, _ = samples_to_mfccs(samples, FLAGS.audio_sample_rate)
mfccs = tf.identity(mfccs, name='mfccs')
# Input tensor will be of shape [batch_size, n_steps, 2*n_context+1, n_input]
# This shape is read by the native_client in DS_CreateModel to know the
# value of n_steps, n_context and n_input. Make sure you update the code
# there if this shape is changed.
input_tensor = tfv1.placeholder(tf.float32, [batch_size, n_steps if n_steps > 0 else None, 2 * Config.n_context + 1, Config.n_input], name='input_node')
seq_length = tfv1.placeholder(tf.int32, [batch_size], name='input_lengths')
if batch_size <= 0:
# no state management since n_step is expected to be dynamic too (see below)
previous_state = None
else:
previous_state_c = tfv1.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_c')
previous_state_h = tfv1.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_h')
previous_state = tf.nn.rnn_cell.LSTMStateTuple(previous_state_c, previous_state_h)
# One rate per layer
no_dropout = [None] * 6
if tflite:
rnn_impl = rnn_impl_static_rnn
else:
rnn_impl = rnn_impl_lstmblockfusedcell
logits, layers = create_model(batch_x=input_tensor,
batch_size=batch_size,
seq_length=seq_length if not FLAGS.export_tflite else None,
dropout=no_dropout,
previous_state=previous_state,
overlap=False,
rnn_impl=rnn_impl)
# TF Lite runtime will check that input dimensions are 1, 2 or 4
# by default we get 3, the middle one being batch_size which is forced to
# one on inference graph, so remove that dimension
if tflite:
logits = tf.squeeze(logits, [1])
# Apply softmax for CTC decoder
logits = tf.nn.softmax(logits, name='logits')
if batch_size <= 0:
if tflite:
raise NotImplementedError('dynamic batch_size does not support tflite nor streaming')
if n_steps > 0:
raise NotImplementedError('dynamic batch_size expect n_steps to be dynamic too')
return (
{
'input': input_tensor,
'input_lengths': seq_length,
},
{
'outputs': logits,
},
layers
)
new_state_c, new_state_h = layers['rnn_output_state']
new_state_c = tf.identity(new_state_c, name='new_state_c')
new_state_h = tf.identity(new_state_h, name='new_state_h')
inputs = {
'input': input_tensor,
'previous_state_c': previous_state_c,
'previous_state_h': previous_state_h,
'input_samples': input_samples,
}
if not FLAGS.export_tflite:
inputs['input_lengths'] = seq_length
outputs = {
'outputs': logits,
'new_state_c': new_state_c,
'new_state_h': new_state_h,
'mfccs': mfccs,
}
return inputs, outputs, layers
def file_relative_read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def export():
r'''
Restores the trained variables into a simpler graph that will be exported for serving.
'''
log_info('Exporting the model...')
inputs, outputs, _ = create_inference_graph(batch_size=FLAGS.export_batch_size, n_steps=FLAGS.n_steps, tflite=FLAGS.export_tflite)
graph_version = int(file_relative_read('GRAPH_VERSION').strip())
assert graph_version > 0
outputs['metadata_version'] = tf.constant([graph_version], name='metadata_version')
outputs['metadata_sample_rate'] = tf.constant([FLAGS.audio_sample_rate], name='metadata_sample_rate')
outputs['metadata_feature_win_len'] = tf.constant([FLAGS.feature_win_len], name='metadata_feature_win_len')
outputs['metadata_feature_win_step'] = tf.constant([FLAGS.feature_win_step], name='metadata_feature_win_step')
outputs['metadata_beam_width'] = tf.constant([FLAGS.export_beam_width], name='metadata_beam_width')
outputs['metadata_alphabet'] = tf.constant([Config.alphabet.serialize()], name='metadata_alphabet')
if FLAGS.export_language:
outputs['metadata_language'] = tf.constant([FLAGS.export_language.encode('utf-8')], name='metadata_language')
# Prevent further graph changes
tfv1.get_default_graph().finalize()
output_names_tensors = [tensor.op.name for tensor in outputs.values() if isinstance(tensor, tf.Tensor)]
output_names_ops = [op.name for op in outputs.values() if isinstance(op, tf.Operation)]
output_names = output_names_tensors + output_names_ops
with tf.Session() as session:
# Restore variables from checkpoint
load_graph_for_evaluation(session)
output_filename = FLAGS.export_file_name + '.pb'
if FLAGS.remove_export:
if os.path.isdir(FLAGS.export_dir):
log_info('Removing old export')
shutil.rmtree(FLAGS.export_dir)
output_graph_path = os.path.join(FLAGS.export_dir, output_filename)
if not os.path.isdir(FLAGS.export_dir):
os.makedirs(FLAGS.export_dir)
frozen_graph = tfv1.graph_util.convert_variables_to_constants(
sess=session,
input_graph_def=tfv1.get_default_graph().as_graph_def(),
output_node_names=output_names)
frozen_graph = tfv1.graph_util.extract_sub_graph(
graph_def=frozen_graph,
dest_nodes=output_names)
if not FLAGS.export_tflite:
with open(output_graph_path, 'wb') as fout:
fout.write(frozen_graph.SerializeToString())
else:
output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite'))
converter = tf.lite.TFLiteConverter(frozen_graph, input_tensors=inputs.values(), output_tensors=outputs.values())
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# AudioSpectrogram and Mfcc ops are custom but have built-in kernels in TFLite
converter.allow_custom_ops = True
tflite_model = converter.convert()
with open(output_tflite_path, 'wb') as fout:
fout.write(tflite_model)
log_info('Models exported at %s' % (FLAGS.export_dir))
metadata_fname = os.path.join(FLAGS.export_dir, '{}_{}_{}.md'.format(
FLAGS.export_author_id,
FLAGS.export_model_name,
FLAGS.export_model_version))
model_runtime = 'tflite' if FLAGS.export_tflite else 'tensorflow'
with open(metadata_fname, 'w') as f:
f.write('---\n')
f.write('author: {}\n'.format(FLAGS.export_author_id))
f.write('model_name: {}\n'.format(FLAGS.export_model_name))
f.write('model_version: {}\n'.format(FLAGS.export_model_version))
f.write('contact_info: {}\n'.format(FLAGS.export_contact_info))
f.write('license: {}\n'.format(FLAGS.export_license))
f.write('language: {}\n'.format(FLAGS.export_language))
f.write('runtime: {}\n'.format(model_runtime))
f.write('min_ds_version: {}\n'.format(FLAGS.export_min_ds_version))
f.write('max_ds_version: {}\n'.format(FLAGS.export_max_ds_version))
f.write('acoustic_model_url: <replace this with a publicly available URL of the acoustic model>\n')
f.write('scorer_url: <replace this with a publicly available URL of the scorer, if present>\n')
f.write('---\n')
f.write('{}\n'.format(FLAGS.export_description))
log_info('Model metadata file saved to {}. Before submitting the exported model for publishing make sure all information in the metadata file is correct, and complete the URL fields.'.format(metadata_fname))
def package_zip():
# --export_dir path/to/export/LANG_CODE/ => path/to/export/LANG_CODE.zip
export_dir = os.path.join(os.path.abspath(FLAGS.export_dir), '') # Force ending '/'
zip_filename = os.path.dirname(export_dir)
shutil.copy(FLAGS.scorer_path, export_dir)
archive = shutil.make_archive(zip_filename, 'zip', export_dir)
log_info('Exported packaged model {}'.format(archive))
def do_single_file_inference(input_file_path):
with tfv1.Session(config=Config.session_config) as session:
inputs, outputs, _ = create_inference_graph(batch_size=1, n_steps=-1)
# Restore variables from training checkpoint
load_graph_for_evaluation(session)
features, features_len = audiofile_to_features(input_file_path)
previous_state_c = np.zeros([1, Config.n_cell_dim])
previous_state_h = np.zeros([1, Config.n_cell_dim])
# Add batch dimension
features = tf.expand_dims(features, 0)
features_len = tf.expand_dims(features_len, 0)
# Evaluate
features = create_overlapping_windows(features).eval(session=session)
features_len = features_len.eval(session=session)
logits = outputs['outputs'].eval(feed_dict={
inputs['input']: features,
inputs['input_lengths']: features_len,
inputs['previous_state_c']: previous_state_c,
inputs['previous_state_h']: previous_state_h,
}, session=session)
logits = np.squeeze(logits)
if FLAGS.scorer_path:
scorer = Scorer(FLAGS.lm_alpha, FLAGS.lm_beta,
FLAGS.scorer_path, Config.alphabet)
else:
scorer = None
decoded = ctc_beam_search_decoder(logits, Config.alphabet, FLAGS.beam_width,
scorer=scorer, cutoff_prob=FLAGS.cutoff_prob,
cutoff_top_n=FLAGS.cutoff_top_n)
# Print highest probability result
print(decoded[0][1])
def early_training_checks():
# Check for proper scorer early
if FLAGS.scorer_path:
scorer = Scorer(FLAGS.lm_alpha, FLAGS.lm_beta,
FLAGS.scorer_path, Config.alphabet)
del scorer
if FLAGS.train_files and FLAGS.test_files and FLAGS.load_checkpoint_dir != FLAGS.save_checkpoint_dir:
log_warn('WARNING: You specified different values for --load_checkpoint_dir '
'and --save_checkpoint_dir, but you are running training and testing '
'in a single invocation. The testing step will respect --load_checkpoint_dir, '
'and thus WILL NOT TEST THE CHECKPOINT CREATED BY THE TRAINING STEP. '
'Train and test in two separate invocations, specifying the correct '
'--load_checkpoint_dir in both cases, or use the same location '
'for loading and saving.')
def main(_):
initialize_globals()
early_training_checks()
if FLAGS.train_files:
tfv1.reset_default_graph()
tfv1.set_random_seed(FLAGS.random_seed)
train()
if FLAGS.test_files:
tfv1.reset_default_graph()
test()
if FLAGS.export_dir and not FLAGS.export_zip:
tfv1.reset_default_graph()
export()
if FLAGS.export_zip:
tfv1.reset_default_graph()
FLAGS.export_tflite = True
if os.listdir(FLAGS.export_dir):
log_error('Directory {} is not empty, please fix this.'.format(FLAGS.export_dir))
sys.exit(1)
export()
package_zip()
if FLAGS.one_shot_infer:
tfv1.reset_default_graph()
do_single_file_inference(FLAGS.one_shot_infer)
def run_script():
create_flags()
absl.app.run(main)
if __name__ == '__main__':
run_script()
|
|
from sympy.core import symbols, S, Pow, Function
from sympy.functions import exp
from sympy.utilities.pytest import raises
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.tensor.index_methods import IndexConformanceException
from sympy import get_contraction_structure, get_indices
def test_trivial_indices():
x, y = symbols('x y')
assert get_indices(x) == (set([]), {})
assert get_indices(x*y) == (set([]), {})
assert get_indices(x + y) == (set([]), {})
assert get_indices(x**y) == (set([]), {})
def test_get_indices_Indexed():
x = IndexedBase('x')
i, j = Idx('i'), Idx('j')
assert get_indices(x[i, j]) == (set([i, j]), {})
assert get_indices(x[j, i]) == (set([j, i]), {})
def test_get_indices_Idx():
f = Function('f')
i, j = Idx('i'), Idx('j')
assert get_indices(f(i)*j) == (set([i, j]), {})
assert get_indices(f(j, i)) == (set([j, i]), {})
assert get_indices(f(i)*i) == (set(), {})
def test_get_indices_mul():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_indices(x[j]*y[i]) == (set([i, j]), {})
assert get_indices(x[i]*y[j]) == (set([i, j]), {})
def test_get_indices_exceptions():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
raises(IndexConformanceException, lambda: get_indices(x[i] + y[j]))
def test_scalar_broadcast():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_indices(x[i] + y[i, i]) == (set([i]), {})
assert get_indices(x[i] + y[j, j]) == (set([i]), {})
def test_get_indices_add():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
assert get_indices(x[i] + 2*y[i]) == (set([i, ]), {})
assert get_indices(y[i] + 2*A[i, j]*x[j]) == (set([i, ]), {})
assert get_indices(y[i] + 2*(x[i] + A[i, j]*x[j])) == (set([i, ]), {})
assert get_indices(y[i] + x[i]*(A[j, j] + 1)) == (set([i, ]), {})
assert get_indices(
y[i] + x[i]*x[j]*(y[j] + A[j, k]*x[k])) == (set([i, ]), {})
def test_get_indices_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
assert get_indices(Pow(x[i], y[j])) == (set([i, j]), {})
assert get_indices(Pow(x[i, k], y[j, k])) == (set([i, j, k]), {})
assert get_indices(Pow(A[i, k], y[k] + A[k, j]*x[j])) == (set([i, k]), {})
assert get_indices(Pow(2, x[i])) == get_indices(exp(x[i]))
# test of a design decision, this may change:
assert get_indices(Pow(x[i], 2)) == (set([i, ]), {})
def test_get_contraction_structure_basic():
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
assert get_contraction_structure(x[i]*y[j]) == {None: set([x[i]*y[j]])}
assert get_contraction_structure(x[i] + y[j]) == {None: set([x[i], y[j]])}
assert get_contraction_structure(x[i]*y[i]) == {(i,): set([x[i]*y[i]])}
assert get_contraction_structure(
1 + x[i]*y[i]) == {None: set([S.One]), (i,): set([x[i]*y[i]])}
assert get_contraction_structure(x[i]**y[i]) == {None: set([x[i]**y[i]])}
def test_get_contraction_structure_complex():
x = IndexedBase('x')
y = IndexedBase('y')
A = IndexedBase('A')
i, j, k = Idx('i'), Idx('j'), Idx('k')
expr1 = y[i] + A[i, j]*x[j]
d1 = {None: set([y[i]]), (j,): set([A[i, j]*x[j]])}
assert get_contraction_structure(expr1) == d1
expr2 = expr1*A[k, i] + x[k]
d2 = {None: set([x[k]]), (i,): set([expr1*A[k, i]]), expr1*A[k, i]: [d1]}
assert get_contraction_structure(expr2) == d2
def test_contraction_structure_simple_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
ii_jj = x[i, i]**y[j, j]
assert get_contraction_structure(ii_jj) == {
None: set([ii_jj]),
ii_jj: [
{(i,): set([x[i, i]])},
{(j,): set([y[j, j]])}
]
}
ii_jk = x[i, i]**y[j, k]
assert get_contraction_structure(ii_jk) == {
None: set([x[i, i]**y[j, k]]),
x[i, i]**y[j, k]: [
{(i,): set([x[i, i]])}
]
}
def test_contraction_structure_Mul_and_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
i_ji = x[i]**(y[j]*x[i])
assert get_contraction_structure(i_ji) == {None: set([i_ji])}
ij_i = (x[i]*y[j])**(y[i])
assert get_contraction_structure(ij_i) == {None: set([ij_i])}
j_ij_i = x[j]*(x[i]*y[j])**(y[i])
assert get_contraction_structure(j_ij_i) == {(j,): set([j_ij_i])}
j_i_ji = x[j]*x[i]**(y[j]*x[i])
assert get_contraction_structure(j_i_ji) == {(j,): set([j_i_ji])}
ij_exp_kki = x[i]*y[j]*exp(y[i]*y[k, k])
result = get_contraction_structure(ij_exp_kki)
expected = {
(i,): set([ij_exp_kki]),
ij_exp_kki: [{
None: set([exp(y[i]*y[k, k])]),
exp(y[i]*y[k, k]): [{
None: set([y[i]*y[k, k]]),
y[i]*y[k, k]: [{(k,): set([y[k, k]])}]
}]}
]
}
assert result == expected
def test_contraction_structure_Add_in_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
i, j, k = Idx('i'), Idx('j'), Idx('k')
s_ii_jj_s = (1 + x[i, i])**(1 + y[j, j])
expected = {
None: set([s_ii_jj_s]),
s_ii_jj_s: [
{None: set([S.One]), (i,): set([x[i, i]])},
{None: set([S.One]), (j,): set([y[j, j]])}
]
}
result = get_contraction_structure(s_ii_jj_s)
assert result == expected
s_ii_jk_s = (1 + x[i, i]) ** (1 + y[j, k])
expected_2 = {
None: set([(x[i, i] + 1)**(y[j, k] + 1)]),
s_ii_jk_s: [
{None: set([S.One]), (i,): set([x[i, i]])}
]
}
result_2 = get_contraction_structure(s_ii_jk_s)
assert result_2 == expected_2
def test_contraction_structure_Pow_in_Pow():
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i, j, k = Idx('i'), Idx('j'), Idx('k')
ii_jj_kk = x[i, i]**y[j, j]**z[k, k]
expected = {
None: set([ii_jj_kk]),
ii_jj_kk: [
{(i,): set([x[i, i]])},
{
None: set([y[j, j]**z[k, k]]),
y[j, j]**z[k, k]: [
{(j,): set([y[j, j]])},
{(k,): set([z[k, k]])}
]
}
]
}
assert get_contraction_structure(ii_jj_kk) == expected
def test_ufunc_support():
f = Function('f')
g = Function('g')
x = IndexedBase('x')
y = IndexedBase('y')
i, j = Idx('i'), Idx('j')
a = symbols('a')
assert get_indices(f(x[i])) == (set([i]), {})
assert get_indices(f(x[i], y[j])) == (set([i, j]), {})
assert get_indices(f(y[i])*g(x[i])) == (set(), {})
assert get_indices(f(a, x[i])) == (set([i]), {})
assert get_indices(f(a, y[i], x[j])*g(x[i])) == (set([j]), {})
assert get_indices(g(f(x[i]))) == (set([i]), {})
assert get_contraction_structure(f(x[i])) == {None: set([f(x[i])])}
assert get_contraction_structure(
f(y[i])*g(x[i])) == {(i,): set([f(y[i])*g(x[i])])}
assert get_contraction_structure(
f(y[i])*g(f(x[i]))) == {(i,): set([f(y[i])*g(f(x[i]))])}
assert get_contraction_structure(
f(x[j], y[i])*g(x[i])) == {(i,): set([f(x[j], y[i])*g(x[i])])}
|
|
import unittest
import mock
from tavrida import dispatcher
from tavrida import entry_point
from tavrida import exceptions
from tavrida import messages
from tavrida import router
from tavrida import service
class DispatcherTestCase(unittest.TestCase):
def setUp(self):
super(DispatcherTestCase, self).setUp()
self.dispatcher = dispatcher.Dispatcher()
def test_handlers_dict(self):
"""
Tests that _handlers dict contains correct sub-dicts
"""
self.assertDictEqual(self.dispatcher.handlers,
{"request": {},
"response": {},
"error": {},
"notification": {}})
def test_subscriptions_dict(self):
"""
Tests that subscriptions dict is equal to 'notifications' dict
"""
self.dispatcher._handlers["notification"] = {"some_key": "some_value"}
self.assertDictEqual(self.dispatcher.subscriptions,
self.dispatcher._handlers["notification"])
def test_register_duplicated_entry_point(self):
"""
Tests registration duplicated entry point
"""
ep = entry_point.EntryPoint("service", "method")
message_type = "request"
method_name = "method_name"
self.dispatcher.register(ep, message_type, method_name)
self.assertRaises(exceptions.DuplicatedEntryPointRegistration,
self.dispatcher.register, ep, message_type,
method_name)
def test_register_duplicated_handler(self):
"""
Tests registration duplicated entry point
"""
ep1 = entry_point.EntryPoint("service1", "method1")
ep2 = entry_point.EntryPoint("service2", "method2")
message_type = "request"
handler_method_name = "method_name"
self.dispatcher.register(ep1, message_type, handler_method_name)
self.assertRaises(exceptions.DuplicatedMethodRegistration,
self.dispatcher.register, ep2, message_type,
handler_method_name)
def test_register_entry_point_handler_positive(self):
"""
Tests successful handler registration for entry point
"""
ep = entry_point.EntryPoint("service", "method")
message_type = "request"
method_name = "method_name"
self.dispatcher.register(ep, message_type, method_name)
self.assertEqual(self.dispatcher.handlers[message_type][str(ep)],
method_name)
def test_get_handler_positive(self):
"""
Tests get handler registered for ep
"""
ep = entry_point.EntryPoint("service", "method")
message_type = "request"
method_name = "method_name"
self.dispatcher.register(ep, message_type, method_name)
self.assertEqual(self.dispatcher.get_handler(ep, message_type),
method_name)
def test_get_handler_for_unknown_message_type(self):
"""
Tests get handler for unknown message type
"""
ep = entry_point.EntryPoint("service", "method")
message_type = "zzz"
self.assertRaises(exceptions.HandlerNotFound,
self.dispatcher.get_handler,
ep, message_type)
def test_get_handler_for_unregistered_entry_point(self):
"""
Tests get handler for unregistered entry point
"""
ep = entry_point.EntryPoint("service", "method")
message_type = "request"
self.assertRaises(exceptions.HandlerNotFound,
self.dispatcher.get_handler,
ep, message_type)
def test_get_publishers(self):
"""
Tests entry points for publishers
"""
ep = entry_point.EntryPoint("service", "method")
message_type = "notification"
method_name = "method_name"
self.dispatcher.register(ep, message_type, method_name)
res = list(self.dispatcher.get_publishers())
self.assertListEqual(res, [ep])
def test_get_request_entry_services(self):
"""
Tests entry point's services for requests
"""
ep = entry_point.EntryPoint("service", "method")
message_type = "request"
method_name = "method_name"
self.dispatcher.register(ep, message_type, method_name)
res = list(self.dispatcher.get_request_entry_services())
self.assertListEqual(res, [ep.service])
def test_get_dispatching_entry_point_for_request(self):
"""
Tests request destination is used for dispatching
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123"
}
context = {}
payload = {}
msg = messages.IncomingRequest(headers, context, payload)
ep = self.dispatcher._get_dispatching_entry_point(msg)
self.assertEqual(ep, msg.destination)
def test_get_dispatching_entry_point_for_response(self):
"""
Tests error source is used for dispatching
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123"
}
context = {}
payload = {}
msg = messages.IncomingResponse(headers, context, payload)
ep = self.dispatcher._get_dispatching_entry_point(msg)
self.assertEqual(ep, msg.source)
def test_get_dispatching_entry_point_for_error(self):
"""
Tests error source is used for dispatching
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123"
}
context = {}
payload = {}
msg = messages.IncomingError(headers, context, payload)
ep = self.dispatcher._get_dispatching_entry_point(msg)
self.assertEqual(ep, msg.source)
def test_get_dispatching_entry_point_for_notification(self):
"""
Tests notification source is used for dispatching
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123"
}
context = {}
payload = {}
msg = messages.IncomingNotification(headers, context, payload)
ep = self.dispatcher._get_dispatching_entry_point(msg)
self.assertEqual(ep, msg.source)
def test_get_source_context_for_request(self):
"""
Tests request destination is used as source in proxy
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123"
}
context = {}
payload = {}
service_instance = mock.MagicMock()
msg = messages.IncomingRequest(headers, context, payload)
ep = self.dispatcher._get_source_context(msg, service_instance)
self.assertEqual(ep, msg.destination)
def test_get_source_context_for_response(self):
"""
Tests request destination is used as source in proxy
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123"
}
context = {}
payload = {}
service_instance = mock.MagicMock()
msg = messages.IncomingResponse(headers, context, payload)
ep = self.dispatcher._get_source_context(msg, service_instance)
self.assertEqual(ep, msg.destination)
def test_get_source_context_for_error(self):
"""
Tests request destination is used as source in proxy
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123"
}
context = {}
payload = {}
service_instance = mock.MagicMock()
msg = messages.IncomingResponse(headers, context, payload)
ep = self.dispatcher._get_source_context(msg, service_instance)
self.assertEqual(ep, msg.destination)
def test_get_source_context_for_notification(self):
"""
Tests request destination is used as source in proxy
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123"
}
context = {}
payload = {}
service_instance = mock.MagicMock()
service_instance.service_name = "service_name"
msg = messages.IncomingNotification(headers, context, payload)
ep = self.dispatcher._get_source_context(msg, service_instance)
self.assertEqual(ep, entry_point.ServiceEntryPoint(
service_instance.service_name))
@mock.patch.object(dispatcher.Dispatcher, "_create_rpc_proxy")
def test_process_request_by_service_instance(self, create_proxy_mock):
"""
Tests that service instance processes message
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123",
"message_type": "request"
}
context = {}
payload = {}
service_instance = mock.MagicMock()
msg = messages.IncomingRequest(headers, context, payload)
ep = entry_point.EntryPointFactory().create(msg.destination)
message_type = "request"
method_name = "some_handler"
self.dispatcher.register(ep, message_type, method_name)
res = self.dispatcher.process(msg, service_instance)
service_instance.process.assert_called_once_with(method_name, msg,
create_proxy_mock())
self.assertEqual(res, service_instance.process())
@mock.patch.object(dispatcher.Dispatcher, "_create_rpc_proxy")
def test_process_response_by_service_instance(self, create_proxy_mock):
"""
Tests that service instance processes message
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123",
"message_type": "response"
}
context = {}
payload = {}
service_instance = mock.MagicMock()
msg = messages.IncomingResponse(headers, context, payload)
ep = entry_point.EntryPointFactory().create(msg.source)
message_type = "response"
method_name = "some_handler"
self.dispatcher.register(ep, message_type, method_name)
res = self.dispatcher.process(msg, service_instance)
service_instance.process.assert_called_once_with(method_name, msg,
create_proxy_mock())
self.assertEqual(res, service_instance.process())
@mock.patch.object(dispatcher.Dispatcher, "_create_rpc_proxy")
def test_process_error_by_service_instance(self, create_proxy_mock):
"""
Tests that service instance processes message
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123",
"message_type": "error"
}
context = {}
payload = {}
service_instance = mock.MagicMock()
msg = messages.IncomingError(headers, context, payload)
ep = entry_point.EntryPointFactory().create(msg.source)
message_type = "error"
method_name = "some_handler"
self.dispatcher.register(ep, message_type, method_name)
res = self.dispatcher.process(msg, service_instance)
service_instance.process.assert_called_once_with(method_name, msg,
create_proxy_mock())
self.assertEqual(res, service_instance.process())
@mock.patch.object(dispatcher.Dispatcher, "_create_rpc_proxy")
def test_process_notification_by_service_instance(self, create_proxy_mock):
"""
Tests that service instance processes message
"""
headers = {
"source": "src_service.src_method",
"destination": "dst_service.dst_method",
"reply_to": "rpl_service.rpl_method",
"correlation_id": "123",
"message_type": "notification"
}
context = {}
payload = {}
service_instance = mock.MagicMock()
msg = messages.IncomingNotification(headers, context, payload)
ep = entry_point.EntryPointFactory().create(msg.source)
message_type = "notification"
method_name = "some_handler"
self.dispatcher.register(ep, message_type, method_name)
res = self.dispatcher.process(msg, service_instance)
service_instance.process.assert_called_once_with(method_name, msg,
create_proxy_mock())
self.assertEqual(res, service_instance.process())
class RegisterRPCHandlerTestCase(unittest.TestCase):
def tearDown(self):
router.Router._services = []
def test_register_handler_request(self):
src = entry_point.EntryPointFactory().create("src_service.src_method")
dst = entry_point.EntryPointFactory().create("dst_service.rpl_method")
@dispatcher.rpc_service(dst.service)
class SomeControllerClass(service.ServiceController):
@dispatcher.rpc_method(service=dst.service, method=dst.method)
def handler(self):
pass
headers = {
"source": str(src),
"destination": str(dst),
"reply_to": str(src),
"correlation_id": "123",
"message_type": "request"
}
context = {}
payload = {}
message = messages.IncomingRequest(headers, context, payload)
service_cls = router.Router().get_rpc_service_cls(message)
self.assertEqual(service_cls, SomeControllerClass)
controller = SomeControllerClass(mock.MagicMock())
disp = controller.get_dispatcher()
handler = disp.get_handler(disp._get_dispatching_entry_point(message),
"request")
self.assertEqual("handler", handler)
def test_register_handler_response(self):
src = entry_point.EntryPointFactory().create("src_service.src_method")
dst = entry_point.EntryPointFactory().create("dst_service.rpl_method")
@dispatcher.rpc_service(dst.service)
class SomeControllerClass(service.ServiceController):
@dispatcher.rpc_response_method(service=src.service,
method=src.method)
def handler(self):
pass
headers = {
"source": str(src),
"destination": str(dst),
"reply_to": "",
"correlation_id": "123",
"message_type": "response"
}
context = {}
payload = {}
message = messages.IncomingResponse(headers, context, payload)
service_cls = router.Router().get_rpc_service_cls(message)
self.assertEqual(service_cls, SomeControllerClass)
controller = SomeControllerClass(mock.MagicMock())
disp = controller.get_dispatcher()
handler = disp.get_handler(disp._get_dispatching_entry_point(message),
"response")
self.assertEqual("handler", handler)
def test_register_handler_error(self):
src = entry_point.EntryPointFactory().create("src_service.src_method")
dst = entry_point.EntryPointFactory().create("dst_service.rpl_method")
@dispatcher.rpc_service(dst.service)
class SomeControllerClass(service.ServiceController):
@dispatcher.rpc_error_method(service=src.service,
method=src.method)
def handler(self):
pass
headers = {
"source": str(src),
"destination": str(dst),
"reply_to": "",
"correlation_id": "123",
"message_type": "error"
}
context = {}
payload = {}
message = messages.IncomingError(headers, context, payload)
service_cls = router.Router().get_rpc_service_cls(message)
self.assertEqual(service_cls, SomeControllerClass)
controller = SomeControllerClass(mock.MagicMock())
disp = controller.get_dispatcher()
handler = disp.get_handler(disp._get_dispatching_entry_point(message),
"error")
self.assertEqual("handler", handler)
def test_register_handler_subscription(self):
src = entry_point.EntryPointFactory().create("src_service.src_method")
dst = entry_point.EntryPointFactory().create("dst_service.rpl_method")
@dispatcher.rpc_service(dst.service)
class SomeControllerClass(service.ServiceController):
@dispatcher.subscription_method(service=src.service,
method=src.method)
def handler(self):
pass
headers = {
"source": str(src),
"destination": "",
"reply_to": "",
"correlation_id": "123",
"message_type": "notification"
}
context = {}
payload = {}
message = messages.IncomingNotification(headers, context, payload)
service_classes = router.Router().get_subscription_cls(message)
self.assertEqual(service_classes, [SomeControllerClass])
controller = SomeControllerClass(mock.MagicMock())
disp = controller.get_dispatcher()
handler = controller.get_dispatcher().get_handler(
disp._get_dispatching_entry_point(message), "notification")
self.assertEqual("handler", handler)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.
NOTE: functions are currently experimental and subject to change!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import re
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _make_argname_from_tensor_name(name):
return re.sub(":0$", "", name).replace(":", "_o")
def _tensor_to_argdef(t, name=None, used_names=None):
"""Convert tensor t to an argdef, with a specified name or a unique name."""
arg = op_def_pb2.OpDef.ArgDef()
if name is None:
arg.name = _make_argname_from_tensor_name(t.name)
if used_names is not None:
if arg.name in used_names:
i = 0
while True:
new_name = "%s_U%d" % (arg.name, i)
if new_name not in used_names:
arg.name = new_name
break
i += 1
used_names.add(arg.name)
else:
arg.name = name
arg.type = t.dtype.as_datatype_enum
return arg
def _get_node_def(op):
return op._node_def # pylint: disable=protected-access
def _get_op_def(op):
return op.op_def or op_def_registry.get_registered_ops()[op.type]
def _is_in_placeholders(op, func_arg_placeholders):
return op.values() and (op.values()[0].name in func_arg_placeholders)
def _create_input_dict(function_graph, func_arg_placeholders):
"""Create a mapping from graph tensor names to function tensor names."""
input_dict = {}
for op in function_graph.get_operations():
if _is_in_placeholders(op, func_arg_placeholders):
input_dict[op.values()[0].name] = op.values()[0].name
input_dict[op.name] = op.name
else:
op_def = _get_op_def(op)
attrs = _get_node_def(op).attr
o = 0
for arg_def in op_def.output_arg:
if arg_def.number_attr:
num = attrs[arg_def.number_attr].i
elif arg_def.type_list_attr:
num = len(attrs[arg_def.type_list_attr].list.type)
else:
num = 1
for i in range(num):
result = "%s:%s:%d" % (op.name, arg_def.name, i)
input_dict[op.values()[o].name] = result
if o == 0:
input_dict[op.name] = result
o += 1
return input_dict
def _add_op_node(op, func, input_dict):
"""Converts an op to a function def node and add it to `func`."""
# Add an entry in func.node_def
# Note that extend() makes a copy in this case, see:
# https://developers.google.com/protocol-buffers/docs/reference/python-generated#repeated-message-fields
func.node_def.extend([_get_node_def(op)])
node_def = func.node_def[-1]
for i in range(len(node_def.input)):
if not node_def.input[i].startswith("^"):
assert node_def.input[i] in input_dict, ("%s missing from %s" %
(node_def.input[i],
input_dict.items()))
node_def.input[i] = input_dict[node_def.input[i]]
def _graph_to_function_def(graph, operations, inputs, outputs, out_names=None):
"""Returns `graph` as a `FunctionDef` protocol buffer.
This method creates a [`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protocol buffer that contains all the ops in `operations`. The
operations become the body of the function.
The arguments `inputs` and `outputs` will be listed as the inputs
and outputs tensors of the function. They must be lists of
tensors present in the graph. The lists can optionally be empty.
Args:
graph: Graph.
operations: the operations to put in the function. Must be a subset of
the operations in the graph.
inputs: List of tensors. Inputs to the function.
outputs: List of tensors. Outputs of the function.
out_names: Optional list of string names for the outputs.
Returns:
A FunctionDef protocol buffer.
Raises:
ValueError: if out_names is specified and the wrong length.
"""
func = function_pb2.FunctionDef()
func.signature.name = "_"
used_names = set()
func.signature.input_arg.extend(
[_tensor_to_argdef(i, used_names=used_names) for i in inputs])
if out_names is None:
used_names = set()
func.signature.output_arg.extend(
[_tensor_to_argdef(o, used_names=used_names) for o in outputs])
elif len(outputs) != len(out_names):
raise ValueError(
"Length of out_names (%d) does not match number of outputs (%d): %s" %
(len(out_names), len(outputs), ", ".join(out_names)))
elif len(out_names) != len(set(out_names)):
raise ValueError(
"Must not have duplicates in out_names: %s" % ", ".join(out_names))
else:
func.signature.output_arg.extend(
[_tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)])
func_arg_placeholders = set([i.name for i in inputs])
input_dict = _create_input_dict(graph, func_arg_placeholders)
for op in operations:
if _is_in_placeholders(op, func_arg_placeholders):
continue
_add_op_node(op, func, input_dict)
if out_names is None:
for index, o in enumerate(outputs):
k = func.signature.output_arg[index].name
func.ret[k] = input_dict[o.name]
else:
for o, n in zip(outputs, out_names):
func.ret[n] = input_dict[o.name]
return func
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" %
(len(sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
with ops.name_scope(name, func_name, inputs) as name:
op = g.create_op(
func_name,
list(inputs),
output_types,
name=name,
attrs=attrs,
op_def=sig,
compute_shapes=False)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _get_func_name(func):
_, func = tf_decorator.unwrap(func)
if callable(func):
if tf_inspect.isfunction(func):
return func.__name__
elif tf_inspect.ismethod(func):
return "%s.%s" % (func.__self__.__name__, func.__name__)
else: # Probably a class instance with __call__
return type(func)
else:
raise ValueError("Argument must be callable")
class _FuncGraph(ops.Graph):
"""A helper for construction a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of every inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
self._captured = {}
self.extra_inputs = []
self.extra_args = []
self.extra_vars = []
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if isinstance(var, resource_variable_ops.ResourceVariable):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def create_op(self, op_type, inputs, data_types, **kwargs):
for i, x in enumerate(inputs):
if x.graph is not self:
# Referring to a tensor from other graph.
if x in self._captured:
# Captured already.
inputs[i] = self._captured[x]
else:
# Substitute with a placeholder.
self.extra_inputs.append(x)
ph = array_ops.placeholder(x.dtype, shape=x.get_shape())
# pylint: disable=protected-access
ph._handle_data = x._handle_data
# pylint: enable=protected-access
inputs[i] = ph
self._captured[x] = ph
self.extra_args.append(ph)
return super(_FuncGraph, self).create_op(op_type, inputs, data_types,
**kwargs)
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._extra_kwargs = kwargs
self._definition = None # Constructed lazily.
self._sub_functions = dict() # Constructed with definition.
self._args = []
assert isinstance(input_types, (list, tuple))
for i in range(len(input_types)):
argname = argnames[i] if i < len(argnames) else ("arg%d" % i)
argtype = input_types[i]
self._args.append((argname, argtype))
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
return self._definition
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Its gradient function's name."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
if self._definition is not None:
return
# Create the func_def object.
temp_graph = _FuncGraph()
with temp_graph.as_default():
# List of placeholders for the function_def.
inputs = []
for (argname, argtype) in self._args:
argholder = array_ops.placeholder(argtype, name=argname)
inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=temp_graph.getvar):
outputs = self._func(*inputs)
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any([_ is None for _ in outputs]):
raise ValueError("Function can not return None.")
# Ensures each output is a Tensor.
outputs = [ops.convert_to_tensor(_) for _ in outputs]
self._extra_inputs = temp_graph.extra_inputs
inputs.extend(temp_graph.extra_args)
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Build the FunctionDef
self._definition = _graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
inputs,
outputs,
out_names=self._out_names)
# Extra kwargs are treated as attrs on the function def.
sig_pre_func_name = self._func_name or _get_func_name(self._func)
kwargs_attr = _parse_kwargs_as_attrs(sig_pre_func_name,
**self._extra_kwargs)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([_get_func_name(self._func), self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# pylint: disable=protected-access
# If 'g' has an identical function already, do nothing.
prev = g._get_function(self.name)
if prev and (prev._hash_str == self._hash_str):
return
# Adds this function into 'g'.
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._definition.signature, *args, **kwargs)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError("shape_func produced %d shapes for %d outputs" %
(len(shapes), len(op.outputs)))
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(
dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
result._definition = fdef
# Captured inputs are added as regular inputs to a function when it's
# serialized, i.e. any extra inputs from the original function are now
# included in `result`._args
result._extra_inputs = []
result._hash_str = result._create_hash_str(
result._definition.signature.input_arg,
result._definition.signature.output_arg, result._definition.node_def)
# pylint: enable=protected-access
return result
def _from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient:
return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.function_name, str(lib)))
if g.gradient_func not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.gradient_func, str(lib)))
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [
fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
]
if not ready:
raise ValueError("FunctionDefLibrary contains cyclic gradient functions!\n"
+ str(lib))
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]:
assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
def _type_list_to_str(types):
if any([_ not in _DTYPE_TO_STR for _ in types]):
raise ValueError("Unsupported dtypes: %s" % types)
return "".join([_DTYPE_TO_STR[_] for _ in types])
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type)
for _ in defined.definition.signature.output_arg
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(
input_types + output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError("Expect a Tensor but get ", x)
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function it will add `call` ops to the
default graph and adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions are frozen in a graph as soon as the graph is used to
create a session. Therefore, nodes using the function must be created in the
graph before the corresponding session is created.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.Constant([1.0])
b = tf.Constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is either a
`_DefinedFunction` or a `Declare` object. The gradient
function must satisify the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("func %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError("Functions with argument defaults or keyword "
"arguments are not supported.")
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
class Declare(object):
"""Declares a TensorFlow function.
The object represents a TensorFlow function which will be defined
later during a graph construction.
For example,
# Declares a function Foo, which takes a tf.int32 named "n" and a
# tf.float32 named "x" as inputs and returns a tf.float32 named "z"
# as its output.
foo = Declare("Foo", [("n", tf.int32), ("x", tf.float32)],
[("z", tf.float32)])
# Defines a function Bar calls Foo.
@tf.Defun(tf.float32)
def Bar(x):
return foo(6, x)
# Defines Foo, with output named "z".
@tf.Defun(tf.int32, tf.float32, out_names=["z"])
def Foo(n, x):
... # Calculation.
return result
"""
def __init__(self, func_name, inputs, outputs):
"""Creates a `Declare` object.
Args:
func_name: The name of the function.
inputs: A list of (name, data type) pairs of function arguments.
outputs: A list of (name, data type) pairs of function return values.
"""
self._sig = op_def_pb2.OpDef()
self._sig.name = func_name
def _to_argdef_list(args):
names = [n for n, t in args]
if len(names) != len(set(names)):
raise ValueError("Expected names to all be unique: %s" % str(names))
return [
op_def_pb2.OpDef.ArgDef(type=t.as_datatype_enum, name=n)
for n, t in args
]
self._sig.input_arg.extend(_to_argdef_list(inputs))
self._sig.output_arg.extend(_to_argdef_list(outputs))
def __call__(self, *inputs, **kwargs):
inputs = [ops.convert_to_tensor(_) for _ in inputs]
return _call(self._sig, *inputs, **kwargs)[0]
|
|
#!/usr/bin/python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# This test simulates the first time a database has to be split:
# - we start with a keyspace with a single shard and a single table
# - we add and populate the sharding key
# - we set the sharding key in the topology
# - we backup / restore into 2 instances
# - we enable filtered replication
# - we move all serving types
# - we scrap the source tablets
# - we remove the original shard
import base64
import logging
import threading
import struct
import time
import unittest
from vtdb import keyrange_constants
import environment
import utils
import tablet
keyspace_id_type = keyrange_constants.KIT_UINT64
pack_keyspace_id = struct.Struct('!Q').pack
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly = tablet.Tablet()
# split shards
# range "" - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
# range 80 - ""
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly = tablet.Tablet()
def setUpModule():
try:
environment.topo_server_setup()
setup_procs = [
shard_master.init_mysql(),
shard_replica.init_mysql(),
shard_rdonly.init_mysql(),
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly.init_mysql(),
]
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_master.teardown_mysql(),
shard_replica.teardown_mysql(),
shard_rdonly.teardown_mysql(),
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server_teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_master.remove_tree()
shard_replica.remove_tree()
shard_rdonly.remove_tree()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly.remove_tree()
class TestInitialSharding(unittest.TestCase):
# create_schema will create the same schema on the keyspace
def _create_schema(self):
create_table_template = '''create table %s(
id bigint auto_increment,
msg varchar(64),
primary key (id),
index by_msg (msg)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchemaKeyspace',
'-simple',
'-sql=' + create_table_template % ("resharding1"),
'test_keyspace'],
auto_log=True)
def _add_sharding_key_to_schema(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s add keyspace_id ' + t
utils.run_vtctl(['ApplySchemaKeyspace',
'-simple',
'-sql=' + sql % ("resharding1"),
'test_keyspace'],
auto_log=True)
def _mark_sharding_key_not_null(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s modify keyspace_id ' + t + ' not null'
utils.run_vtctl(['ApplySchemaKeyspace',
'-simple',
'-sql=' + sql % ("resharding1"),
'test_keyspace'],
auto_log=True)
# _insert_startup_value inserts a value in the MySQL database before it
# is sharded
def _insert_startup_value(self, tablet, table, id, msg):
tablet.mquery('vt_test_keyspace', [
'begin',
'insert into %s(id, msg) values(%u, "%s")' % (table, id, msg),
'commit'
], write=True)
def _insert_startup_values(self):
self._insert_startup_value(shard_master, 'resharding1', 1, 'msg1')
self._insert_startup_value(shard_master, 'resharding1', 2, 'msg2')
self._insert_startup_value(shard_master, 'resharding1', 3, 'msg3')
def _backfill_keyspace_id(self, tablet):
tablet.mquery('vt_test_keyspace', [
'begin',
'update resharding1 set keyspace_id=0x1000000000000000 where id=1',
'update resharding1 set keyspace_id=0x9000000000000000 where id=2',
'update resharding1 set keyspace_id=0xD000000000000000 where id=3',
'commit'
], write=True)
# _insert_value inserts a value in the MySQL database along with the comments
# required for routing.
def _insert_value(self, tablet, table, id, msg, keyspace_id):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
k = base64.b64encode(pack_keyspace_id(keyspace_id))
else:
k = "%u" % keyspace_id
tablet.mquery('vt_test_keyspace', [
'begin',
'insert into %s(id, msg, keyspace_id) values(%u, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%u */' % (table, id, msg, keyspace_id, k, id),
'commit'
], write=True)
def _get_value(self, tablet, table, id):
return tablet.mquery('vt_test_keyspace', 'select id, msg, keyspace_id from %s where id=%u' % (table, id))
def _check_value(self, tablet, table, id, msg, keyspace_id,
should_be_here=True):
result = self._get_value(tablet, table, id)
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = "%s"
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = "%x"
if should_be_here:
self.assertEqual(result, ((id, msg, keyspace_id),),
("Bad row in tablet %s for id=%u, keyspace_id=" +
fmt + ", row=%s") % (tablet.tablet_alias, id,
keyspace_id, str(result)))
else:
self.assertEqual(len(result), 0,
("Extra row in tablet %s for id=%u, keyspace_id=" +
fmt + ": %s") % (tablet.tablet_alias, id, keyspace_id,
str(result)))
# _is_value_present_and_correct tries to read a value.
# if it is there, it will check it is correct and return True if it is.
# if not correct, it will self.fail.
# if not there, it will return False.
def _is_value_present_and_correct(self, tablet, table, id, msg, keyspace_id):
result = self._get_value(tablet, table, id)
if len(result) == 0:
return False
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = "%s"
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = "%x"
self.assertEqual(result, ((id, msg, keyspace_id),),
("Bad row in tablet %s for id=%u, keyspace_id=" + fmt) % (
tablet.tablet_alias, id, keyspace_id))
return True
def _check_startup_values(self):
# check first value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly]:
self._check_value(t, 'resharding1', 1, 'msg1', 0x1000000000000000)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly]:
self._check_value(t, 'resharding1', 1, 'msg1',
0x1000000000000000, should_be_here=False)
# check second value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
# check third value is in the right shard too
for t in [shard_0_master, shard_0_replica, shard_0_rdonly]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_master, 'resharding1', 10000 + base + i,
'msg-range1-%u' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_master, 'resharding1', 20000 + base + i,
'msg-range2-%u' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
10000 + base + i, 'msg-range1-%u' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
20000 + base + i, 'msg-range2-%u' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug("I have %u%% of the data", percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return
timeout = utils.wait_step('enough data went through', timeout)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
found = 0
for i in xrange(count):
self._check_value(shard_0_replica, 'resharding1', 10000 + base + i,
'msg-range1-%u' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_0_replica, 'resharding1', 20000 + base + i,
'msg-range2-%u' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
# create the keyspace with just one shard
utils.run_vtctl(['CreateKeyspace',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', 'test_keyspace',
'keyspace_id', keyspace_id_type])
shard_master.init_tablet( 'master', 'test_keyspace', '0')
shard_replica.init_tablet('replica', 'test_keyspace', '0')
shard_rdonly.init_tablet( 'rdonly', 'test_keyspace', '0')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# create databases so vttablet can start behaving normally
for t in [shard_master, shard_replica, shard_rdonly]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None)
# wait for the tablets
shard_master.wait_for_vttablet_state('SERVING')
shard_replica.wait_for_vttablet_state('SERVING')
shard_rdonly.wait_for_vttablet_state('SERVING')
# reparent to make the tablets work
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/0',
shard_master.tablet_alias], auto_log=True)
# create the tables and add startup values
self._create_schema()
self._insert_startup_values()
# change the schema, backfill keyspace_id, and change schema again
self._add_sharding_key_to_schema()
self._backfill_keyspace_id(shard_master)
self._mark_sharding_key_not_null()
# create the split shards
shard_0_master.init_tablet( 'master', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_rdonly.init_tablet( 'rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet( 'master', 'test_keyspace', '80-')
shard_1_replica.init_tablet('replica', 'test_keyspace', '80-')
shard_1_rdonly.init_tablet( 'rdonly', 'test_keyspace', '80-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly]:
t.start_vttablet(wait_for_state=None)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ReparentShard', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n' +
'Partitions(rdonly): -\n' +
'Partitions(replica): -\n' +
'TabletTypes: master,rdonly,replica',
keyspace_id_type=keyspace_id_type)
# take the snapshot for the split
utils.run_vtctl(['MultiSnapshot', '--spec=-80-',
shard_replica.tablet_alias], auto_log=True)
# wait for tablet's binlog server service to be enabled after snapshot
shard_replica.wait_for_binlog_server_state("Enabled")
# perform the restore.
utils.run_vtctl(['ShardMultiRestore', '-strategy=populateBlpCheckpoint',
'test_keyspace/-80', shard_replica.tablet_alias],
auto_log=True)
utils.run_vtctl(['ShardMultiRestore', '-strategy=populateBlpCheckpoint',
'test_keyspace/80-', shard_replica.tablet_alias],
auto_log=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
# check the binlog players are running
shard_0_master.wait_for_binlog_player_count(1)
shard_1_master.wait_for_binlog_player_count(1)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug("Inserting lots of data on source shard")
self._insert_lots(1000)
logging.debug("Checking 80 percent of data is sent quickly")
self._check_lots_timeout(1000, 80, 5)
logging.debug("Checking all data goes through eventually")
self._check_lots_timeout(1000, 100, 20)
logging.debug("Checking no data was sent the wrong way")
self._check_lots_not_present(1000)
# use the vtworker checker to compare the data
logging.debug("Running vtworker SplitDiff for -80")
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', 'test_keyspace/-80'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
logging.debug("Running vtworker SplitDiff for 80-")
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', 'test_keyspace/80-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause("Good time to test vtworker for diffs")
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
expect_fail=True)
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -\n' +
'TabletTypes: master,rdonly,replica',
keyspace_id_type=keyspace_id_type)
# then serve replica from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -80 80-\n' +
'TabletTypes: master,rdonly,replica',
keyspace_id_type=keyspace_id_type)
# move replica back and forth
utils.run_vtctl(['MigrateServedTypes', '-reverse', 'test_keyspace/0', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -\n' +
'TabletTypes: master,rdonly,replica',
keyspace_id_type=keyspace_id_type)
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -80 80-\n' +
'TabletTypes: master,rdonly,replica',
keyspace_id_type=keyspace_id_type)
# then serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -80 80-\n' +
'TabletTypes: master,rdonly,replica',
keyspace_id_type=keyspace_id_type)
# check the binlog players are gone now
shard_0_master.wait_for_binlog_player_count(0)
shard_1_master.wait_for_binlog_player_count(0)
# make sure we can't delete a shard with tablets
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], expect_fail=True)
# scrap the original tablets in the original shard
for t in [shard_master, shard_replica, shard_rdonly]:
utils.run_vtctl(['ScrapTablet', t.tablet_alias], auto_log=True)
tablet.kill_tablets([shard_master, shard_replica, shard_rdonly])
for t in [shard_master, shard_replica, shard_rdonly]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly,
shard_1_master, shard_1_replica, shard_1_rdonly])
if __name__ == '__main__':
utils.main()
|
|
import numpy as np
from .base import HomogFamilyAlignment
from .affine import DiscreteAffine, Affine
from .similarity import Similarity
def Scale(scale_factor, n_dims=None):
r"""
Factory function for producing Scale transforms. Zero scale factors are not
permitted.
A :class:`UniformScale` will be produced if:
- A float ``scale_factor`` and a ``n_dims`` kwarg are provided
- A ndarray scale_factor with shape (``n_dims``, ) is provided with all
elements being the same
A :class:`NonUniformScale` will be provided if:
- A ndarray ``scale_factor`` with shape (``n_dims``, ) is provided with
at least two differing scale factors.
Parameters
----------
scale_factor: double or (D,) ndarray
Scale for each axis.
n_dims: int
The dimensionality of the output transform.
Returns
-------
scale : :class:`UniformScale` or :class:`NonUniformScale`
The correct type of scale
Raises
-------
ValueError
If any of the scale factors is zero
"""
from numbers import Number
if not isinstance(scale_factor, Number):
# some array like thing - make it a numpy array for sure
scale_factor = np.asarray(scale_factor)
if not np.all(scale_factor):
raise ValueError('Having a zero in one of the scales is invalid')
if n_dims is None:
# scale_factor better be a numpy array then
if np.allclose(scale_factor, scale_factor[0]):
return UniformScale(scale_factor[0], scale_factor.shape[0])
else:
return NonUniformScale(scale_factor)
else:
# interpret as a scalar then
return UniformScale(scale_factor, n_dims)
class NonUniformScale(DiscreteAffine, Affine):
r"""
An ``n_dims`` scale transform, with a scale component for each dimension.
Parameters
----------
scale : (D,) ndarray
A scale for each axis.
"""
def __init__(self, scale):
scale = np.asarray(scale)
h_matrix = np.eye(scale.size + 1)
np.fill_diagonal(h_matrix, scale)
h_matrix[-1, -1] = 1
Affine.__init__(self, h_matrix)
@classmethod
def identity(cls, n_dims):
return NonUniformScale(np.ones(n_dims))
def set_h_matrix(self, value):
raise NotImplementedError("The h_matrix cannot "
"be set on a NonUniformScale.")
@property
def scale(self):
r"""
The scale vector.
:type: (D,) ndarray
"""
return self.h_matrix.diagonal()[:-1]
def _transform_str(self):
message = 'NonUniformScale by %s ' % self.scale
return message
@property
def n_parameters(self):
"""
The number of parameters: ``n_dims``.
:type: int
``n_dims`` parameters - ``[scale_x, scale_y, ....]`` - The scalar values
representing the scale across each axis.
"""
return self.scale.size
def as_vector(self):
r"""
Return the parameters of the transform as a 1D array. These parameters
are parametrised as deltas from the identity warp. The parameters
are output in the order [s0, s1, ...].
+----------+--------------------------------------------+
|parameter | definition |
+==========+============================================+
|s0 | The scale across the first axis |
+----------+--------------------------------------------+
|s1 | The scale across the second axis |
+----------+--------------------------------------------+
|... | ... |
+----------+--------------------------------------------+
|sn | The scale across the nth axis |
+----------+--------------------------------------------+
Returns
-------
s : (D,) ndarray
The scale across each axis.
"""
return self.scale
def from_vector_inplace(self, vector):
r"""
Updates the NonUniformScale inplace.
Parameters
----------
vector : (D,) ndarray
The array of parameters.
"""
np.fill_diagonal(self.h_matrix, vector)
self.h_matrix[-1, -1] = 1
@property
def composes_inplace_with(self):
return (NonUniformScale, UniformScale)
def _build_pseudoinverse(self):
"""
The inverse scale.
:type: :class:`NonUniformScale`
"""
return NonUniformScale(1.0 / self.scale)
class UniformScale(DiscreteAffine, Similarity):
r"""
An abstract similarity scale transform, with a single scale component
applied to all dimensions. This is abstracted out to remove unnecessary
code duplication.
"""
def __init__(self, scale, n_dims):
h_matrix = np.eye(n_dims + 1)
np.fill_diagonal(h_matrix, scale)
h_matrix[-1, -1] = 1
Similarity.__init__(self, h_matrix)
@classmethod
def identity(cls, n_dims):
return UniformScale(1, n_dims)
@property
def scale(self):
r"""
The single scale value.
:type: double
"""
return self.h_matrix[0, 0]
def _transform_str(self):
message = 'UniformScale by %f ' % self.scale
return message
@property
def n_parameters(self):
r"""
The number of parameters: 1
:type: int
"""
return 1
def as_vector(self):
r"""
Return the parameters of the transform as a 1D array. These parameters
are parametrised as deltas from the identity warp. The parameters
are output in the order [s].
+----------+--------------------------------+
|parameter | definition |
+==========+================================+
|s | The scale across each axis |
+----------+--------------------------------+
Returns
-------
s : double
The scale across each axis.
"""
return self.scale
def from_vector_inplace(self, p):
np.fill_diagonal(self.h_matrix, p)
self.h_matrix[-1, -1] = 1
@property
def composes_inplace_with(self):
return UniformScale
def _build_pseudoinverse(self):
r"""
The inverse scale.
:type: type(self)
"""
return type(self)(1.0 / self.scale, self.n_dims)
class AlignmentUniformScale(HomogFamilyAlignment, UniformScale):
def __init__(self, source, target):
HomogFamilyAlignment.__init__(self, source, target)
UniformScale.__init__(self, target.norm() / source.norm(),
source.n_dims)
def from_vector_inplace(self, p):
UniformScale.from_vector_inplace(self, p)
self._sync_target_from_state()
def _sync_state_from_target(self):
new_scale = self.target.norm() / self.source.norm()
np.fill_diagonal(self.h_matrix, new_scale)
self.h_matrix[-1, -1] = 1
def copy_without_alignment(self):
return UniformScale(self.scale, self.n_dims)
|
|
from hippy import consts
from hippy.klass import def_class
from hippy.objects.base import W_Root
from hippy.objects.intobject import W_IntObject
from hippy.objects.instanceobject import W_InstanceObject
from hippy.builtin import Optional
from hippy.builtin_klass import GetterSetterWrapper
from hippy.module.reflections.exception import k_ReflectionException
IS_STATIC = 1
IS_PUBLIC = 256
IS_PROTECTED = 512
IS_PRIVATE = 1024
class W_ReflectionProperty(W_InstanceObject):
class_name = ''
name = ''
def get_str(self):
prop = self.ref_prop
if prop is None:
inner = '<dynamic> public $%s' % self.name
else:
access = ''
if not prop.is_static():
access += '<default> '
if prop.is_public():
access += 'public'
elif prop.is_protected():
access += 'protected'
elif prop.is_private():
access += 'private'
else:
assert False, 'should not happen'
if prop.is_static():
access += ' static'
inner = '%s $%s' % (access, prop.name)
return 'Property [ %s ]\n' % inner
def _get_class(interp, this):
return interp.space.newstr(this.class_name)
def _set_class(interp, this, w_value):
pass
def _get_name(interp, this):
return interp.space.newstr(this.name)
def _set_name(interp, this, w_value):
pass
k_ReflectionProperty = def_class(
'ReflectionProperty',
['export', '__construct', 'getName', 'getValue', 'setValue',
'getDeclaringClass', "isPublic", "isPrivate", "isProtected", "isStatic",
"isDefault", "getModifiers", "__toString"],
[GetterSetterWrapper(_get_name, _set_name, 'name', consts.ACC_PUBLIC),
GetterSetterWrapper(_get_class, _set_class, 'class', consts.ACC_PUBLIC)],
[('IS_STATIC', W_IntObject(IS_STATIC)),
('IS_PUBLIC', W_IntObject(IS_PUBLIC)),
('IS_PROTECTED', W_IntObject(IS_PROTECTED)),
('IS_PRIVATE', W_IntObject(IS_PRIVATE))],
instance_class=W_ReflectionProperty)
@k_ReflectionProperty.def_method(['interp', W_Root, str, Optional(bool)],
flags=consts.ACC_STATIC)
def export(interp, w_klass, name, return_string=False):
refl = k_ReflectionProperty.call_args(interp,
[w_klass, interp.space.wrap(name)])
result = refl.get_str()
if return_string:
return interp.space.wrap(result)
else:
interp.writestr(result)
interp.writestr('\n')
return interp.space.w_Null
@k_ReflectionProperty.def_method(['interp', 'this', W_Root, str])
def __construct(interp, this, w_class, property_name):
space = interp.space
if space.is_str(w_class):
class_name = space.str_w(w_class)
klass = interp.lookup_class_or_intf(class_name)
if klass is None:
msg = "Class %s does not exist" % class_name
interp.throw(msg, klass=k_ReflectionException)
elif isinstance(w_class, W_InstanceObject):
klass = w_class.getclass()
class_name = klass.name
else:
msg = ("The parameter class is expected to be either a string "
"or an object")
raise interp.throw(msg, klass=k_ReflectionException)
this.class_name = class_name
this.name = property_name
this.ref_klass = klass
this.flags = 0
try:
this.ref_prop = klass.properties[property_name]
if this.ref_prop.is_static():
this.flags |= IS_STATIC
if this.ref_prop.is_public():
this.flags |= IS_PUBLIC
elif this.ref_prop.is_private():
this.flags |= IS_PRIVATE
elif this.ref_prop.is_protected():
this.flags |= IS_PROTECTED
except KeyError:
if (isinstance(w_class, W_InstanceObject) and
w_class.map.lookup(property_name) is not None):
this.ref_prop = None
this.flags = consts.ACC_IMPLICIT_PUBLIC
return
msg = "Property %s::$%s does not exist" % (class_name, property_name)
interp.throw(msg, klass=k_ReflectionException)
@k_ReflectionProperty.def_method(['interp', 'this'])
def getName(interp, this):
return _get_name(interp, this)
# XXX: getValue & setValue don't work in case of accessible private & protected
# properties
@k_ReflectionProperty.def_method(['interp', 'this', Optional(W_Root)])
def getValue(interp, this, w_obj=None):
property = this.ref_prop
if property is None:
return w_obj.getattr(interp, this.name, w_obj.getclass(),
give_notice=False)
if not property.is_public():
msg = "Cannot access non-public member %s::%s" % (this.class_name,
this.name)
raise interp.throw(msg, klass=k_ReflectionException)
if not property.is_static():
w_value = w_obj.getattr(interp, this.name, w_obj.getclass(),
give_notice=False)
else:
w_value = property.getvalue(interp.space).deref()
return w_value
@k_ReflectionProperty.def_method(['interp', 'this', W_Root, Optional(W_Root)])
def setValue(interp, this, w_arg_1, w_arg_2=None):
if not this.ref_prop.is_public():
msg = "Cannot access non-public member %s::%s" % (this.class_name,
this.name)
raise interp.throw(msg, klass=k_ReflectionException)
if not this.ref_prop.is_static():
w_obj = w_arg_1
w_value = w_arg_2
w_obj.setattr(interp, this.name, w_value, None)
else:
if w_arg_2 is None:
w_value = w_arg_1
else:
w_value = w_arg_2
this.ref_prop.r_value.store(w_value)
@k_ReflectionProperty.def_method(['interp', 'this'])
def getDeclaringClass(interp, this):
name = this.ref_prop.klass.name
k_ReflClass = interp.lookup_class_or_intf('ReflectionClass')
return k_ReflClass.call_args(interp, [interp.space.newstr(name)])
@k_ReflectionProperty.def_method(['interp', 'this'])
def isPublic(interp, this):
return interp.space.newbool(this.ref_prop.is_public())
@k_ReflectionProperty.def_method(['interp', 'this'])
def isPrivate(interp, this):
return interp.space.newbool(this.ref_prop.is_private())
@k_ReflectionProperty.def_method(['interp', 'this'])
def isProtected(interp, this):
return interp.space.newbool(this.ref_prop.is_protected())
@k_ReflectionProperty.def_method(['interp', 'this'])
def isStatic(interp, this):
return interp.space.newbool(this.ref_prop.is_static())
@k_ReflectionProperty.def_method(['interp', 'this'])
def isDefault(interp, this):
return interp.space.newbool(True) # XXX
@k_ReflectionProperty.def_method(['interp', 'this'])
def getModifiers(interp, this):
return interp.space.newint(this.ref_prop.access_flags)
@k_ReflectionProperty.def_method(['interp', 'this'])
def __toString(interp, this):
return interp.space.newstr(this.get_str())
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe, os, re
from frappe.utils import touch_file, encode, cstr
def make_boilerplate(dest, app_name):
if not os.path.exists(dest):
print("Destination directory does not exist")
return
# app_name should be in snake_case
app_name = frappe.scrub(app_name)
hooks = frappe._dict()
hooks.app_name = app_name
app_title = hooks.app_name.replace("_", " ").title()
for key in ("App Title (default: {0})".format(app_title),
"App Description", "App Publisher", "App Email",
"App Icon (default 'octicon octicon-file-directory')",
"App Color (default 'grey')",
"App License (default 'MIT')"):
hook_key = key.split(" (")[0].lower().replace(" ", "_")
hook_val = None
while not hook_val:
hook_val = cstr(raw_input(key + ": "))
if not hook_val:
defaults = {
"app_title": app_title,
"app_icon": "octicon octicon-file-directory",
"app_color": "grey",
"app_license": "MIT"
}
if hook_key in defaults:
hook_val = defaults[hook_key]
if hook_key=="app_name" and hook_val.lower().replace(" ", "_") != hook_val:
print("App Name must be all lowercase and without spaces")
hook_val = ""
elif hook_key=="app_title" and not re.match("^(?![\W])[^\d_\s][\w -]+$", hook_val, re.UNICODE):
print("App Title should start with a letter and it can only consist of letters, numbers, spaces and underscores")
hook_val = ""
hooks[hook_key] = hook_val
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, frappe.scrub(hooks.app_title)),
with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "www"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"pages"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "templates",
"includes"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "config"), with_init=True)
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"css"))
frappe.create_folder(os.path.join(dest, hooks.app_name, hooks.app_name, "public",
"js"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "__init__.py"), "w") as f:
f.write(encode(init_template))
with open(os.path.join(dest, hooks.app_name, "MANIFEST.in"), "w") as f:
f.write(encode(manifest_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, ".gitignore"), "w") as f:
f.write(encode(gitignore_template.format(app_name = hooks.app_name)))
with open(os.path.join(dest, hooks.app_name, "setup.py"), "w") as f:
f.write(encode(setup_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, "requirements.txt"), "w") as f:
f.write("frappe")
with open(os.path.join(dest, hooks.app_name, "README.md"), "w") as f:
f.write(encode("## {0}\n\n{1}\n\n#### License\n\n{2}".format(hooks.app_title,
hooks.app_description, hooks.app_license)))
with open(os.path.join(dest, hooks.app_name, "license.txt"), "w") as f:
f.write(encode("License: " + hooks.app_license))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "modules.txt"), "w") as f:
f.write(encode(hooks.app_title))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "hooks.py"), "w") as f:
f.write(encode(hooks_template.format(**hooks)))
touch_file(os.path.join(dest, hooks.app_name, hooks.app_name, "patches.txt"))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "desktop.py"), "w") as f:
f.write(encode(desktop_template.format(**hooks)))
with open(os.path.join(dest, hooks.app_name, hooks.app_name, "config", "docs.py"), "w") as f:
f.write(encode(docs_template.format(**hooks)))
print("'{app}' created at {path}".format(app=app_name, path=os.path.join(dest, app_name)))
manifest_template = """include MANIFEST.in
include requirements.txt
include *.json
include *.md
include *.py
include *.txt
recursive-include {app_name} *.css
recursive-include {app_name} *.csv
recursive-include {app_name} *.html
recursive-include {app_name} *.ico
recursive-include {app_name} *.js
recursive-include {app_name} *.json
recursive-include {app_name} *.md
recursive-include {app_name} *.png
recursive-include {app_name} *.py
recursive-include {app_name} *.svg
recursive-include {app_name} *.txt
recursive-exclude {app_name} *.pyc"""
init_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__version__ = '0.0.1'
"""
hooks_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "{app_name}"
app_title = "{app_title}"
app_publisher = "{app_publisher}"
app_description = "{app_description}"
app_icon = "{app_icon}"
app_color = "{app_color}"
app_email = "{app_email}"
app_license = "{app_license}"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/{app_name}/css/{app_name}.css"
# app_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js, css files in header of web template
# web_include_css = "/assets/{app_name}/css/{app_name}.css"
# web_include_js = "/assets/{app_name}/js/{app_name}.js"
# include js in page
# page_js = {{"page" : "public/js/file.js"}}
# include js in doctype views
# doctype_js = {{"doctype" : "public/js/doctype.js"}}
# doctype_list_js = {{"doctype" : "public/js/doctype_list.js"}}
# doctype_tree_js = {{"doctype" : "public/js/doctype_tree.js"}}
# doctype_calendar_js = {{"doctype" : "public/js/doctype_calendar.js"}}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {{
# "Role": "home_page"
# }}
# Website user home page (by function)
# get_website_user_home_page = "{app_name}.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "{app_name}.install.before_install"
# after_install = "{app_name}.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "{app_name}.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {{
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }}
#
# has_permission = {{
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }}
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {{
# "*": {{
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }}
# }}
# Scheduled Tasks
# ---------------
# scheduler_events = {{
# "all": [
# "{app_name}.tasks.all"
# ],
# "daily": [
# "{app_name}.tasks.daily"
# ],
# "hourly": [
# "{app_name}.tasks.hourly"
# ],
# "weekly": [
# "{app_name}.tasks.weekly"
# ]
# "monthly": [
# "{app_name}.tasks.monthly"
# ]
# }}
# Testing
# -------
# before_tests = "{app_name}.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {{
# "frappe.desk.doctype.event.event.get_events": "{app_name}.event.get_events"
# }}
"""
desktop_template = """# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{{
"module_name": "{app_title}",
"color": "{app_color}",
"icon": "{app_icon}",
"type": "module",
"label": _("{app_title}")
}}
]
"""
setup_template = """# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in {app_name}/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('{app_name}/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='{app_name}',
version=version,
description='{app_description}',
author='{app_publisher}',
author_email='{app_email}',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link]
)
"""
gitignore_template = """.DS_Store
*.pyc
*.egg-info
*.swp
tags
{app_name}/docs/current"""
docs_template = '''"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/{app_name}"
# docs_base_url = "https://[org_name].github.io/{app_name}"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "{app_title}"
'''
|
|
import sys
sys.path.append('./../../..')
from mtSet.pycore.utils.utils import smartSum
import mtSet.pycore.covariance as covariance
import pdb
import numpy as NP
import scipy as SP
import scipy.linalg as LA
import sys
import time as TIME
from gp_base import GP
class gp3kronSumLR(GP):
def __init__(self,Y,Cg,Cn,X,rank=1,Xr=None,lazy=False,offset=1e-4):
"""
Y: Phenotype matrix
Cg: LIMIX trait-to-trait covariance for genetic contribution
Cn: LIMIX trait-to-trait covariance for noise
"""
# init cache
self.cache = {}
# pheno
self.setY(Y)
# colCovariances
self.setColCovars(rank,Cg,Cn)
# row covars
self.set_X(X)
if Xr is not None: self.set_Xr(Xr)
#offset for trait covariance matrices
self.setOffset(offset)
self.params = None
#lazy
self.lazy = lazy
# time
self.time = {}
self.count = {}
def get_time(self):
""" returns time dictionary """
return self.time
def get_count(self):
""" return count dictionary """
return self.count
def restart(self):
""" set all times to 0 """
for key in self.time.keys():
self.time[key] = 0
self.count[key] = 0
def setColCovars(self,rank,Cg,Cn):
"""
set column covariances
"""
self.rank = rank
# col covars
self.Cr = covariance.lowrank(self.P,self.rank)
self.Cr.setParams(1e-3*SP.randn(self.P*self.rank))
self.Cg = Cg
self.Cn = Cn
def setY(self,Y):
"""
set phenotype
"""
self.N,self.P = Y.shape
self.Y = Y
self.Y_has_changed = True
def setOffset(self,offset):
"""
set offset
"""
self.offset = offset
def set_X(self,X):
"""
set pop struct row covariance
"""
self.X = X
self.X_has_changed = True
def set_Xr(self,Xr):
"""
set SNPs in the region
"""
self.Xr = Xr
self.S = Xr.shape[1]
self.Xr_has_changed = True
def getParams(self):
"""
get hper parameters
"""
params = {}
params['Cr'] = self.Cr.getParams()
params['Cg'] = self.Cg.getParams()
params['Cn'] = self.Cn.getParams()
return params
def setParams(self,params):
"""
set hper parameters
"""
if self.lazy:
run_update = False
if self.params is None:
run_update = True
else:
if not(SP.allclose(self.params['Cr'],params['Cr'])):
run_update = True
if not(SP.allclose(self.params['Cn'],params['Cn'])):
run_update = True
if not(SP.allclose(self.params['Cg'],params['Cg'])):
run_update = True
else:
run_update = True
if run_update:
self.params = params
self.updateParams()
def updateParams(self):
"""
update parameters
"""
keys =self. params.keys()
if 'Cr' in keys:
self.Cr.setParams(self.params['Cr'])
if 'Cg' in keys:
self.Cg.setParams(self.params['Cg'])
if 'Cn' in keys:
self.Cn.setParams(self.params['Cn'])
def _update_cache(self):
"""
Update cache
"""
cov_params_have_changed = self.Cr.params_have_changed or self.Cg.params_have_changed or self.Cn.params_have_changed
if self.X_has_changed:
self.cache['trXX'] = SP.sum(self.X**2)
self.cache['XX'] = SP.dot(self.X.T,self.X)
self.cache['XY'] = SP.dot(self.X.T,self.Y)
self.cache['XXY'] = SP.dot(self.X,self.cache['XY'])
self.cache['XXXY'] = SP.dot(self.cache['XX'],self.cache['XY'])
self.cache['XXXX'] = SP.dot(self.cache['XX'],self.cache['XX'])
if self.Xr_has_changed:
self.cache['trXrXr'] = SP.sum(self.Xr**2)
self.cache['XrXr'] = SP.dot(self.Xr.T,self.Xr)
self.cache['XrY'] = SP.dot(self.Xr.T,self.Y)
self.cache['XrXrY'] = SP.dot(self.Xr,self.cache['XrY'])
self.cache['XrXrXrY'] = SP.dot(self.cache['XrXr'],self.cache['XrY'])
self.cache['XrXrXrXr'] = SP.dot(self.cache['XrXr'],self.cache['XrXr'])
if self.X_has_changed or self.Xr_has_changed:
self.cache['XXr'] = SP.dot(self.X.T,self.Xr)
self.cache['XXrXrY'] = SP.dot(self.cache['XXr'],self.cache['XrY'])
self.cache['XXrXrX'] = SP.dot(self.cache['XXr'],self.cache['XXr'].T)
self.cache['XrXXY'] = SP.dot(self.cache['XXr'].T,self.cache['XY'])
self.cache['XrXXXr'] = SP.dot(self.cache['XXr'].T,self.cache['XXr'])
self.cache['XrXrXrX'] = SP.dot(self.cache['XrXr'],self.cache['XXr'].T)
self.cache['XrXXX'] = SP.dot(self.cache['XXr'].T,self.cache['XX'])
if cov_params_have_changed:
start = TIME.time()
""" Col SVD Bg + Noise """
S2,U2 = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P))
self.cache['Sc2'] = S2
US2 = SP.dot(U2,SP.diag(SP.sqrt(S2)))
USi2 = SP.dot(U2,SP.diag(SP.sqrt(1./S2)))
self.cache['Lc'] = USi2.T
self.cache['Cstar'] = SP.dot(USi2.T,SP.dot(self.Cg.K(),USi2))
self.cache['Scstar'],Ucstar = LA.eigh(self.cache['Cstar'])
self.cache['CstarH'] = Ucstar*((self.cache['Scstar']**(0.5))[SP.newaxis,:])
E = SP.reshape(self.Cr.getParams(),(self.P,self.rank),order='F')
self.cache['Estar'] = SP.dot(USi2.T,E)
self.cache['CE'] = SP.dot(self.cache['CstarH'].T,self.cache['Estar'])
self.cache['EE'] = SP.dot(self.cache['Estar'].T,self.cache['Estar'])
if cov_params_have_changed or self.Y_has_changed:
self.cache['LY'] = SP.dot(self.Y,self.cache['Lc'].T)
if cov_params_have_changed or self.Xr_has_changed or self.Y_has_changed:
self.cache['XrLY'] = SP.dot(self.cache['XrY'],self.cache['Lc'].T)
self.cache['WLY1'] = SP.dot(self.cache['XrLY'],self.cache['Estar'])
self.cache['XrXrLY'] = SP.dot(self.cache['XrXrY'],self.cache['Lc'].T)
self.cache['XrXrXrLY'] = SP.dot(self.cache['XrXrXrY'],self.cache['Lc'].T)
if cov_params_have_changed or self.X_has_changed or self.Y_has_changed:
self.cache['XLY'] = SP.dot(self.cache['XY'],self.cache['Lc'].T)
self.cache['WLY2'] = SP.dot(self.cache['XLY'],self.cache['CstarH'])
self.cache['XXLY'] = SP.dot(self.cache['XXY'],self.cache['Lc'].T)
self.cache['XXXLY'] = SP.dot(self.cache['XXXY'],self.cache['Lc'].T)
if cov_params_have_changed or self.X_has_changed or self.Xr_has_changed:
""" calculate B """
B11 = SP.kron(self.cache['EE'],self.cache['XrXr'])
B11+= SP.eye(B11.shape[0])
B21 = SP.kron(self.cache['CE'],self.cache['XXr'])
B22 = SP.kron(SP.diag(self.cache['Scstar']),self.cache['XX'])
B22+= SP.eye(B22.shape[0])
B = SP.bmat([[B11,B21.T],[B21,B22]])
self.cache['cholB'] = LA.cholesky(B).T
self.cache['Bi'] = LA.cho_solve((self.cache['cholB'],True),SP.eye(B.shape[0]))
if cov_params_have_changed or self.X_has_changed or self.Xr_has_changed or self.Y_has_changed:
self.cache['WLY'] = SP.concatenate([SP.reshape(self.cache['WLY1'],(self.cache['WLY1'].size,1),order='F'),
SP.reshape(self.cache['WLY2'],(self.cache['WLY2'].size,1),order='F')])
self.cache['BiWLY'] = SP.dot(self.cache['Bi'],self.cache['WLY'])
self.cache['XXrXrLY'] = SP.dot(self.cache['XXrXrY'],self.cache['Lc'].T)
self.cache['XrXXLY'] = SP.dot(self.cache['XrXXY'],self.cache['Lc'].T)
self.Xr_has_changed = False
self.X_has_changed = False
self.Y_has_changed = False
self.Cr.params_have_changed = False
self.Cg.params_have_changed = False
self.Cn.params_have_changed = False
def LML(self,params=None,*kw_args):
"""
calculate LML
"""
if params is not None:
self.setParams(params)
self._update_cache()
start = TIME.time()
#1. const term
lml = self.N*self.P*SP.log(2*SP.pi)
#2. logdet term
lml += SP.sum(SP.log(self.cache['Sc2']))*self.N
lml += 2*SP.log(SP.diag(self.cache['cholB'])).sum()
#3. quatratic term
lml += SP.sum(self.cache['LY']*self.cache['LY'])
lml -= SP.sum(self.cache['WLY']*self.cache['BiWLY'])
lml *= 0.5
smartSum(self.time,'lml',TIME.time()-start)
smartSum(self.count,'lml',1)
return lml
def LMLdebug(self):
"""
LML function for debug
"""
assert self.N*self.P<5000, 'gp3kronSum:: N*P>=5000'
Rr = SP.dot(self.Xr,self.Xr.T)
XX = SP.dot(self.X,self.X.T)
y = SP.reshape(self.Y,(self.N*self.P), order='F')
K = SP.kron(self.Cr.K(),Rr)
K += SP.kron(self.Cg.K(),XX)
K += SP.kron(self.Cn.K()+1e-4*SP.eye(self.P),SP.eye(self.N))
cholK = LA.cholesky(K)
Kiy = LA.cho_solve((cholK,False),y)
lml = y.shape[0]*SP.log(2*SP.pi)
lml += 2*SP.log(SP.diag(cholK)).sum()
lml += SP.dot(y,Kiy)
lml *= 0.5
return lml
def LMLgrad(self,params=None,**kw_args):
"""
LML gradient
"""
if params is not None:
self.setParams(params)
self._update_cache()
RV = {}
covars = ['Cr','Cg','Cn']
for covar in covars:
RV[covar] = self._LMLgrad_covar(covar)
return RV
def _LMLgrad_covar(self,covar,**kw_args):
"""
calculates LMLgrad for covariance parameters
"""
start = TIME.time()
# precompute some stuff
if covar=='Cr': n_params = self.Cr.getNumberParams()
elif covar=='Cg': n_params = self.Cg.getNumberParams()
elif covar=='Cn': n_params = self.Cn.getNumberParams()
if covar=='Cr':
trR = self.cache['trXrXr']
RY = self.cache['XrXrY']
RLY = self.cache['XrXrLY']
WrRY1 = self.cache['XrXrXrY']
WrRY2 = self.cache['XXrXrY']
WrRLY1 = self.cache['XrXrXrLY']
WrRLY2 = self.cache['XXrXrLY']
XrRXr = self.cache['XrXrXrXr']
XrRX = self.cache['XrXrXrX']
XRX = self.cache['XXrXrX']
elif covar=='Cg':
trR = self.cache['trXX']
RY = self.cache['XXY']
RLY = self.cache['XXLY']
WrRY1 = self.cache['XrXXY']
WrRY2 = self.cache['XXXY']
WrRLY1 = self.cache['XrXXLY']
WrRLY2 = self.cache['XXXLY']
XrRXr = self.cache['XrXXXr']
XrRX = self.cache['XrXXX']
XRX = self.cache['XXXX']
else:
trR = self.N
RY = self.Y
RLY = self.cache['LY']
WrRY1 = self.cache['XrY']
WrRY2 = self.cache['XY']
WrRLY1 = self.cache['XrLY']
WrRLY2 = self.cache['XLY']
XrRXr = self.cache['XrXr']
XrRX = self.cache['XXr'].T
XRX = self.cache['XX']
smartSum(self.time,'lmlgrad_trace2_rKDW_%s'%covar,TIME.time()-start)
smartSum(self.count,'lmlgrad_trace2_rKDW_%s'%covar,1)
# fill gradient vector
RV = SP.zeros(n_params)
for i in range(n_params):
#0. calc LCL
if covar=='Cr': C = self.Cr.Kgrad_param(i)
elif covar=='Cg': C = self.Cg.Kgrad_param(i)
elif covar=='Cn': C = self.Cn.Kgrad_param(i)
LCL = SP.dot(self.cache['Lc'],SP.dot(C,self.cache['Lc'].T))
ELCL = SP.dot(self.cache['Estar'].T,LCL)
ELCLE = SP.dot(ELCL,self.cache['Estar'])
ELCLCsh = SP.dot(ELCL,self.cache['CstarH'])
CshLCL = SP.dot(self.cache['CstarH'].T,LCL)
CshLCLCsh = SP.dot(CshLCL,self.cache['CstarH'])
# WCoRW
WCoRW11 = SP.kron(ELCLE,XrRXr)
WCoRW12 = SP.kron(ELCLCsh,XrRX)
WCoRW22 = SP.kron(CshLCLCsh,XRX)
WCoRW = SP.array(SP.bmat([[WCoRW11,WCoRW12],[WCoRW12.T,WCoRW22]]))
# WCoRLY
WCoRLY1 = SP.dot(WrRLY1,ELCL.T)
WCoRLY2 = SP.dot(WrRLY2,CshLCL.T)
WCoRLY = SP.concatenate([SP.reshape(WCoRLY1,(WCoRLY1.size,1),order='F'),
SP.reshape(WCoRLY2,(WCoRLY2.size,1),order='F')])
# CoRLY
CoRLY = SP.dot(RLY,LCL.T)
#1. der of log det
start = TIME.time()
trC = LCL.diagonal().sum()
RV[i] = trC*trR
RV[i]-= SP.sum(self.cache['Bi']*WCoRW)
smartSum(self.time,'lmlgrad_trace2_WDKDW_%s'%covar,TIME.time()-start)
smartSum(self.count,'lmlgrad_trace2_WDKDW_%s'%covar,1)
#2. der of quad form
start = TIME.time()
RV[i] -= SP.sum(self.cache['LY']*CoRLY)
RV[i] -= SP.sum(self.cache['BiWLY']*SP.dot(WCoRW,self.cache['BiWLY']))
RV[i] += 2*SP.sum(self.cache['BiWLY']*WCoRLY)
smartSum(self.time,'lmlgrad_quadForm_%s'%covar,TIME.time()-start)
smartSum(self.count,'lmlgrad_quadForm_%s'%covar,1)
RV[i] *= 0.5
return RV
def LMLgrad_debug(self,params=None,**kw_args):
"""
LML gradient
"""
if params is not None:
self.setParams(params)
RV = {}
covars = ['Cr','Cg','Cn']
for covar in covars:
RV[covar] = self._LMLgrad_covar_debug(covar)
return RV
def _LMLgrad_covar_debug(self,covar):
assert self.N*self.P<5000, 'gp3kronSum:: N*P>=5000'
Rr = SP.dot(self.Xr,self.Xr.T)
XX = SP.dot(self.X,self.X.T)
y = SP.reshape(self.Y,(self.N*self.P), order='F')
K = SP.kron(self.Cr.K(),Rr)
K += SP.kron(self.Cg.K(),XX)
K += SP.kron(self.Cn.K()+1e-4*SP.eye(self.P),SP.eye(self.N))
cholK = LA.cholesky(K).T
Ki = LA.cho_solve((cholK,True),SP.eye(y.shape[0]))
Kiy = LA.cho_solve((cholK,True),y)
if covar=='Cr': n_params = self.Cr.getNumberParams()
elif covar=='Cg': n_params = self.Cg.getNumberParams()
elif covar=='Cn': n_params = self.Cn.getNumberParams()
RV = SP.zeros(n_params)
for i in range(n_params):
#0. calc grad_i
if covar=='Cr':
C = self.Cr.Kgrad_param(i)
Kgrad = SP.kron(C,Rr)
elif covar=='Cg':
C = self.Cg.Kgrad_param(i)
Kgrad = SP.kron(C,XX)
elif covar=='Cn':
C = self.Cn.Kgrad_param(i)
Kgrad = SP.kron(C,SP.eye(self.N))
#1. der of log det
RV[i] = 0.5*(Ki*Kgrad).sum()
#2. der of quad form
RV[i] -= 0.5*(Kiy*SP.dot(Kgrad,Kiy)).sum()
return RV
if 0:
def LMLgrad(self,hyperparams,**kw_args):
"""
evaludates the gradient of the log marginal likelihood
Input:
hyperparams: dictionary
priors: prior beliefs for the hyperparameter
"""
self._update_inputs(hyperparams)
RV = {}
# gradient with respect to hyperparameters
RV.update(self._LMLgrad_covar(hyperparams))
# gradient with respect to noise parameters
if self.likelihood is not None:
RV.update(self._LMLgrad_lik(hyperparams))
# gradient with respect to X
RV.update(self._LMLgrad_x(hyperparams))
return RV
def _LMLgrad_x(self,hyperparams):
"""
evaluates the gradient of the log marginal likelihood with
respect to the latent variables
"""
try:
KV = self.get_covariances(hyperparams)
except LA.LinAlgError:
LG.error('linalg exception in _LML_grad_x')
return {'X': SP.zeros(hyperparams['X'].shape)}
except LA.LinAlgError:
LG.error('linalg exception in _LML_grad_x')
return {'X': SP.zeros(hyperparams['X'].shape)}
W = KV['W']
LMLgrad = SP.zeros((self.n,self.d))
for d in xrange(self.d):
Kd_grad = self.covar.Kgrad_x(hyperparams['covar'],self.X,None,d)
LMLgrad[:,d] = SP.sum(W*Kd_grad,axis=0)
if self.debugging:
# compare to explicit solution
LMLgrad2 = SP.zeros((self.n,self.d))
for n in xrange(self.n):
for d in xrange(self.d):
Knd_grad = self.covar.Kgrad_x(hyperparams['covar'],self.X,n,d)
LMLgrad2[n,d] = 0.5*(W*Knd_grad).sum()
assert SP.allclose(LMLgrad,LMLgrad2), 'ouch, something is wrong'
return {'X':LMLgrad}
def _update_inputs(self,hyperparams):
""" update the inputs from gplvm model """
if 'X' in hyperparams:
self.X = hyperparams['X']
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Quotas for instances, volumes, and floating ips."""
import datetime
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
quota_opts = [
cfg.IntOpt('quota_instances',
default=10,
help='number of instances allowed per project'),
cfg.IntOpt('quota_cores',
default=20,
help='number of instance cores allowed per project'),
cfg.IntOpt('quota_ram',
default=50 * 1024,
help='megabytes of instance ram allowed per project'),
cfg.IntOpt('quota_volumes',
default=10,
help='number of volumes allowed per project'),
cfg.IntOpt('quota_gigabytes',
default=1000,
help='number of volume gigabytes allowed per project'),
cfg.IntOpt('quota_floating_ips',
default=10,
help='number of floating ips allowed per project'),
cfg.IntOpt('quota_fixed_ips',
default=-1,
help=('number of fixed ips allowed per project (this should be '
'at least the number of instances allowed)')),
cfg.IntOpt('quota_metadata_items',
default=128,
help='number of metadata items allowed per instance'),
cfg.IntOpt('quota_injected_files',
default=5,
help='number of injected files allowed'),
cfg.IntOpt('quota_injected_file_content_bytes',
default=10 * 1024,
help='number of bytes allowed per injected file'),
cfg.IntOpt('quota_injected_file_path_bytes',
default=255,
help='number of bytes allowed per injected file path'),
cfg.IntOpt('quota_security_groups',
default=10,
help='number of security groups per project'),
cfg.IntOpt('quota_security_group_rules',
default=20,
help='number of security rules per security group'),
cfg.IntOpt('quota_key_pairs',
default=100,
help='number of key pairs per user'),
cfg.IntOpt('reservation_expire',
default=86400,
help='number of seconds until a reservation expires'),
cfg.IntOpt('until_refresh',
default=0,
help='count of reservations until usage is refreshed'),
cfg.IntOpt('max_age',
default=0,
help='number of seconds between subsequent usage refreshes'),
cfg.StrOpt('quota_driver',
default='nova.quota.DbQuotaDriver',
help='default driver to use for quota checks'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(quota_opts)
class DbQuotaDriver(object):
"""
Driver to perform necessary checks to enforce quotas and obtain
quota information. The default driver utilizes the local
database.
"""
def get_by_project(self, context, project_id, resource):
"""Get a specific quota by project."""
return db.quota_get(context, project_id, resource)
def get_by_class(self, context, quota_class, resource):
"""Get a specific quota by quota class."""
return db.quota_class_get(context, quota_class, resource)
def get_defaults(self, context, resources):
"""Given a list of resources, retrieve the default quotas.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
"""
quotas = {}
for resource in resources.values():
quotas[resource.name] = resource.default
return quotas
def get_class_quotas(self, context, resources, quota_class,
defaults=True):
"""
Given a list of resources, retrieve the quotas for the given
quota class.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
quotas = {}
class_quotas = db.quota_class_get_all_by_name(context, quota_class)
for resource in resources.values():
if defaults or resource.name in class_quotas:
quotas[resource.name] = class_quotas.get(resource.name,
resource.default)
return quotas
def get_project_quotas(self, context, resources, project_id,
quota_class=None, defaults=True,
usages=True):
"""
Given a list of resources, retrieve the quotas for the given
project.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified. It
will be ignored if project_id ==
context.project_id.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
quotas = {}
project_quotas = db.quota_get_all_by_project(context, project_id)
if usages:
project_usages = db.quota_usage_get_all_by_project(context,
project_id)
# Get the quotas for the appropriate class. If the project ID
# matches the one in the context, we use the quota_class from
# the context, otherwise, we use the provided quota_class (if
# any)
if project_id == context.project_id:
quota_class = context.quota_class
if quota_class:
class_quotas = db.quota_class_get_all_by_name(context, quota_class)
else:
class_quotas = {}
for resource in resources.values():
# Omit default/quota class values
if not defaults and resource.name not in project_quotas:
continue
quotas[resource.name] = dict(
limit=project_quotas.get(resource.name, class_quotas.get(
resource.name, resource.default)),
)
# Include usages if desired. This is optional because one
# internal consumer of this interface wants to access the
# usages directly from inside a transaction.
if usages:
usage = project_usages.get(resource.name, {})
quotas[resource.name].update(
in_use=usage.get('in_use', 0),
reserved=usage.get('reserved', 0),
)
return quotas
def _get_quotas(self, context, resources, keys, has_sync):
"""
A helper method which retrieves the quotas for the specific
resources identified by keys, and which apply to the current
context.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param keys: A list of the desired quotas to retrieve.
:param has_sync: If True, indicates that the resource must
have a sync attribute; if False, indicates
that the resource must NOT have a sync
attribute.
"""
# Filter resources
if has_sync:
sync_filt = lambda x: hasattr(x, 'sync')
else:
sync_filt = lambda x: not hasattr(x, 'sync')
desired = set(keys)
sub_resources = dict((k, v) for k, v in resources.items()
if k in desired and sync_filt(v))
# Make sure we accounted for all of them...
if len(keys) != len(sub_resources):
unknown = desired - set(sub_resources.keys())
raise exception.QuotaResourceUnknown(unknown=sorted(unknown))
# Grab and return the quotas (without usages)
quotas = self.get_project_quotas(context, sub_resources,
context.project_id,
context.quota_class, usages=False)
return dict((k, v['limit']) for k, v in quotas.items())
def limit_check(self, context, resources, values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param values: A dictionary of the values to check against the
quota.
"""
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
if unders:
raise exception.InvalidQuotaValue(unders=sorted(unders))
# Get the applicable quotas
quotas = self._get_quotas(context, resources, values.keys(),
has_sync=False)
# Check the quotas and construct a list of the resources that
# would be put over limit by the desired values
overs = [key for key, val in values.items()
if quotas[key] >= 0 and quotas[key] < val]
if overs:
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages={})
def reserve(self, context, resources, deltas, expire=None):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param resources: A dictionary of the registered resources.
:param deltas: A dictionary of the proposed delta changes.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
"""
# Set up the reservation expiration
if expire is None:
expire = FLAGS.reservation_expire
if isinstance(expire, (int, long)):
expire = datetime.timedelta(seconds=expire)
if isinstance(expire, datetime.timedelta):
expire = timeutils.utcnow() + expire
if not isinstance(expire, datetime.datetime):
raise exception.InvalidReservationExpiration(expire=expire)
# Get the applicable quotas.
# NOTE(Vek): We're not worried about races at this point.
# Yes, the admin may be in the process of reducing
# quotas, but that's a pretty rare thing.
quotas = self._get_quotas(context, resources, deltas.keys(),
has_sync=True)
# NOTE(Vek): Most of the work here has to be done in the DB
# API, because we have to do it in a transaction,
# which means access to the session. Since the
# session isn't available outside the DBAPI, we
# have to do the work there.
return db.quota_reserve(context, resources, quotas, deltas, expire,
FLAGS.until_refresh, FLAGS.max_age)
def commit(self, context, reservations):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
db.reservation_commit(context, reservations)
def rollback(self, context, reservations):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
db.reservation_rollback(context, reservations)
def destroy_all_by_project(self, context, project_id):
"""
Destroy all quotas, usages, and reservations associated with a
project.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
db.quota_destroy_all_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
db.reservation_expire(context)
class BaseResource(object):
"""Describe a single resource for quota checking."""
def __init__(self, name, flag=None):
"""
Initializes a Resource.
:param name: The name of the resource, i.e., "instances".
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
self.name = name
self.flag = flag
def quota(self, driver, context, **kwargs):
"""
Given a driver and context, obtain the quota for this
resource.
:param driver: A quota driver.
:param context: The request context.
:param project_id: The project to obtain the quota value for.
If not provided, it is taken from the
context. If it is given as None, no
project-specific quota will be searched
for.
:param quota_class: The quota class corresponding to the
project, or for which the quota is to be
looked up. If not provided, it is taken
from the context. If it is given as None,
no quota class-specific quota will be
searched for. Note that the quota class
defaults to the value in the context,
which may not correspond to the project if
project_id is not the same as the one in
the context.
"""
# Get the project ID
project_id = kwargs.get('project_id', context.project_id)
# Ditto for the quota class
quota_class = kwargs.get('quota_class', context.quota_class)
# Look up the quota for the project
if project_id:
try:
return driver.get_by_project(context, project_id, self.name)
except exception.ProjectQuotaNotFound:
pass
# Try for the quota class
if quota_class:
try:
return driver.get_by_class(context, quota_class, self.name)
except exception.QuotaClassNotFound:
pass
# OK, return the default
return self.default
@property
def default(self):
"""Return the default value of the quota."""
return FLAGS[self.flag] if self.flag else -1
class ReservableResource(BaseResource):
"""Describe a reservable resource."""
def __init__(self, name, sync, flag=None):
"""
Initializes a ReservableResource.
Reservable resources are those resources which directly
correspond to objects in the database, i.e., instances, cores,
etc. A ReservableResource must be constructed with a usage
synchronization function, which will be called to determine the
current counts of one or more resources.
The usage synchronization function will be passed three
arguments: an admin context, the project ID, and an opaque
session object, which should in turn be passed to the
underlying database function. Synchronization functions
should return a dictionary mapping resource names to the
current in_use count for those resources; more than one
resource and resource count may be returned. Note that
synchronization functions may be associated with more than one
ReservableResource.
:param name: The name of the resource, i.e., "instances".
:param sync: A callable which returns a dictionary to
resynchronize the in_use count for one or more
resources, as described above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(ReservableResource, self).__init__(name, flag=flag)
self.sync = sync
class AbsoluteResource(BaseResource):
"""Describe a non-reservable resource."""
pass
class CountableResource(AbsoluteResource):
"""
Describe a resource where the counts aren't based solely on the
project ID.
"""
def __init__(self, name, count, flag=None):
"""
Initializes a CountableResource.
Countable resources are those resources which directly
correspond to objects in the database, i.e., instances, cores,
etc., but for which a count by project ID is inappropriate. A
CountableResource must be constructed with a counting
function, which will be called to determine the current counts
of the resource.
The counting function will be passed the context, along with
the extra positional and keyword arguments that are passed to
Quota.count(). It should return an integer specifying the
count.
Note that this counting is not performed in a transaction-safe
manner. This resource class is a temporary measure to provide
required functionality, until a better approach to solving
this problem can be evolved.
:param name: The name of the resource, i.e., "instances".
:param count: A callable which returns the count of the
resource. The arguments passed are as described
above.
:param flag: The name of the flag or configuration option
which specifies the default value of the quota
for this resource.
"""
super(CountableResource, self).__init__(name, flag=flag)
self.count = count
class QuotaEngine(object):
"""Represent the set of recognized quotas."""
def __init__(self, quota_driver_class=None):
"""Initialize a Quota object."""
if not quota_driver_class:
quota_driver_class = FLAGS.quota_driver
if isinstance(quota_driver_class, basestring):
quota_driver_class = importutils.import_object(quota_driver_class)
self._resources = {}
self._driver = quota_driver_class
def __contains__(self, resource):
return resource in self._resources
def register_resource(self, resource):
"""Register a resource."""
self._resources[resource.name] = resource
def register_resources(self, resources):
"""Register a list of resources."""
for resource in resources:
self.register_resource(resource)
def get_by_project(self, context, project_id, resource):
"""Get a specific quota by project."""
return self._driver.get_by_project(context, project_id, resource)
def get_by_class(self, context, quota_class, resource):
"""Get a specific quota by quota class."""
return self._driver.get_by_class(context, quota_class, resource)
def get_defaults(self, context):
"""Retrieve the default quotas.
:param context: The request context, for access checks.
"""
return self._driver.get_defaults(context, self._resources)
def get_class_quotas(self, context, quota_class, defaults=True):
"""Retrieve the quotas for the given quota class.
:param context: The request context, for access checks.
:param quota_class: The name of the quota class to return
quotas for.
:param defaults: If True, the default value will be reported
if there is no specific value for the
resource.
"""
return self._driver.get_class_quotas(context, self._resources,
quota_class, defaults=defaults)
def get_project_quotas(self, context, project_id, quota_class=None,
defaults=True, usages=True):
"""Retrieve the quotas for the given project.
:param context: The request context, for access checks.
:param project_id: The ID of the project to return quotas for.
:param quota_class: If project_id != context.project_id, the
quota class cannot be determined. This
parameter allows it to be specified.
:param defaults: If True, the quota class value (or the
default value, if there is no value from the
quota class) will be reported if there is no
specific value for the resource.
:param usages: If True, the current in_use and reserved counts
will also be returned.
"""
return self._driver.get_project_quotas(context, self._resources,
project_id,
quota_class=quota_class,
defaults=defaults,
usages=usages)
def count(self, context, resource, *args, **kwargs):
"""Count a resource.
For countable resources, invokes the count() function and
returns its result. Arguments following the context and
resource are passed directly to the count function declared by
the resource.
:param context: The request context, for access checks.
:param resource: The name of the resource, as a string.
"""
# Get the resource
res = self._resources.get(resource)
if not res or not hasattr(res, 'count'):
raise exception.QuotaResourceUnknown(unknown=[resource])
return res.count(context, *args, **kwargs)
def limit_check(self, context, **values):
"""Check simple quota limits.
For limits--those quotas for which there is no usage
synchronization function--this method checks that a set of
proposed values are permitted by the limit restriction. The
values to check are given as keyword arguments, where the key
identifies the specific quota limit to check, and the value is
the proposed value.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it is not a simple limit
resource.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns
nothing.
:param context: The request context, for access checks.
"""
return self._driver.limit_check(context, self._resources, values)
def reserve(self, context, expire=None, **deltas):
"""Check quotas and reserve resources.
For counting quotas--those quotas for which there is a usage
synchronization function--this method checks quotas against
current usage and the desired deltas. The deltas are given as
keyword arguments, and current usage and other reservations
are factored into the quota check.
This method will raise a QuotaResourceUnknown exception if a
given resource is unknown or if it does not have a usage
synchronization function.
If any of the proposed values is over the defined quota, an
OverQuota exception will be raised with the sorted list of the
resources which are too high. Otherwise, the method returns a
list of reservation UUIDs which were created.
:param context: The request context, for access checks.
:param expire: An optional parameter specifying an expiration
time for the reservations. If it is a simple
number, it is interpreted as a number of
seconds and added to the current time; if it is
a datetime.timedelta object, it will also be
added to the current time. A datetime.datetime
object will be interpreted as the absolute
expiration time. If None is specified, the
default expiration time set by
--default-reservation-expire will be used (this
value will be treated as a number of seconds).
"""
reservations = self._driver.reserve(context, self._resources, deltas,
expire=expire)
LOG.debug(_("Created reservations %(reservations)s") % locals())
return reservations
def commit(self, context, reservations):
"""Commit reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
try:
self._driver.commit(context, reservations)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % locals())
def rollback(self, context, reservations):
"""Roll back reservations.
:param context: The request context, for access checks.
:param reservations: A list of the reservation UUIDs, as
returned by the reserve() method.
"""
try:
self._driver.rollback(context, reservations)
except Exception:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to roll back reservations "
"%(reservations)s") % locals())
def destroy_all_by_project(self, context, project_id):
"""
Destroy all quotas, usages, and reservations associated with a
project.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
self._driver.destroy_all_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
self._driver.expire(context)
@property
def resources(self):
return sorted(self._resources.keys())
def _sync_instances(context, project_id, session):
return dict(zip(('instances', 'cores', 'ram'),
db.instance_data_get_for_project(
context, project_id, session=session)))
def _sync_volumes(context, project_id, session):
return dict(zip(('volumes', 'gigabytes'),
db.volume_data_get_for_project(
context, project_id, session=session)))
def _sync_floating_ips(context, project_id, session):
return dict(floating_ips=db.floating_ip_count_by_project(
context, project_id, session=session))
def _sync_fixed_ips(context, project_id, session):
return dict(fixed_ips=db.fixed_ip_count_by_project(
context, project_id, session=session))
def _sync_security_groups(context, project_id, session):
return dict(security_groups=db.security_group_count_by_project(
context, project_id, session=session))
QUOTAS = QuotaEngine()
resources = [
ReservableResource('instances', _sync_instances, 'quota_instances'),
ReservableResource('cores', _sync_instances, 'quota_cores'),
ReservableResource('ram', _sync_instances, 'quota_ram'),
ReservableResource('volumes', _sync_volumes, 'quota_volumes'),
ReservableResource('gigabytes', _sync_volumes, 'quota_gigabytes'),
ReservableResource('floating_ips', _sync_floating_ips,
'quota_floating_ips'),
ReservableResource('fixed_ips', _sync_fixed_ips, 'quota_fixed_ips'),
AbsoluteResource('metadata_items', 'quota_metadata_items'),
AbsoluteResource('injected_files', 'quota_injected_files'),
AbsoluteResource('injected_file_content_bytes',
'quota_injected_file_content_bytes'),
AbsoluteResource('injected_file_path_bytes',
'quota_injected_file_path_bytes'),
ReservableResource('security_groups', _sync_security_groups,
'quota_security_groups'),
CountableResource('security_group_rules',
db.security_group_rule_count_by_group,
'quota_security_group_rules'),
CountableResource('key_pairs', db.key_pair_count_by_user,
'quota_key_pairs'),
]
QUOTAS.register_resources(resources)
|
|
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.test_support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.test_support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxint])
self.assertRaises(IndexError, lambda: b[sys.maxint+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxint])
self.assertRaises(IndexError, lambda: b[-sys.maxint-1])
self.assertRaises(IndexError, lambda: b[-sys.maxint-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
# allowed in 2.x
#self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxint])
self.assertRaises(ValueError, self.type2test, [sys.maxint+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character sizes
self.assertEqual(self.type2test(b"\0a\0b\0c") == u"abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == u"abc", False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == u"abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == u"abc", False)
self.assertEqual(self.type2test() == unicode(), False)
self.assertEqual(self.type2test() != unicode(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = u"Hello world\n\u1234\u5678\u9abc\udef0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
b = self.type2test(sample, "latin1", "ignore")
self.assertEqual(b, self.type2test(sample[:-4], "utf-8"))
def test_decode(self):
sample = u"Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = u"Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + u"def")
self.assertRaises(TypeError, lambda: u"abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
self.assertRaises((OverflowError, MemoryError),
lambda: b * sys.maxsize)
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: u"a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(u''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEqual(self.type2test.fromhex(u'1a2B30'), b)
self.assertEqual(self.type2test.fromhex(u' 1A 2B 30 '), b)
self.assertEqual(self.type2test.fromhex(u'0000'), b'\0\0')
self.assertRaises(ValueError, self.type2test.fromhex, u'a')
self.assertRaises(ValueError, self.type2test.fromhex, u'rt')
self.assertRaises(ValueError, self.type2test.fromhex, u'1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, u'\x00')
self.assertRaises(ValueError, self.type2test.fromhex, u'12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, u' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegexp(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
# Python 2.x
b_sample = (ord(s) for s in sample)
self.assertEqual(list(b), list(b_sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(r"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
b[3:] = b'foo'
self.assertEqual(b, bytearray([0, 1, 2, 102, 111, 111]))
b[:3] = memoryview(b'foo')
self.assertEqual(b, bytearray([102, 111, 111, 102, 111, 111]))
b[3:4] = []
self.assertEqual(b, bytearray([102, 111, 111, 111, 111]))
b[1:] = list(b'uuuu') # this works only on Python2
self.assertEqual(b, bytearray([102, 117, 117, 117, 117]))
for elem in [5, -5, 0, long(10e20), u'str', 2.3, [u'a', u'b'], [[]]]:
with self.assertRaises(TypeError):
b[3:4] = elem
for elem in [[254, 255, 256], [-256, 9000]]:
with self.assertRaises(ValueError):
b[3:4] = elem
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += u""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertGreater(alloc, len(b)) # including trailing null byte
if alloc not in seq:
seq.append(alloc)
def test_init_alloc(self):
b = bytearray()
def g():
for i in range(1, 100):
yield i
a = list(b)
self.assertEqual(a, list(range(1, len(a)+1)))
self.assertEqual(len(b), len(a))
self.assertLessEqual(len(b), i)
alloc = b.__alloc__()
self.assertGreater(alloc, len(b)) # including trailing null byte
b.__init__(g())
self.assertEqual(list(b), list(range(1, 100)))
self.assertEqual(len(b), 99)
alloc = b.__alloc__()
self.assertGreater(alloc, len(b))
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(ord, orig * 25))
a.extend(ord(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove(u'e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(u'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(u'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
# allowed in 2.x
#self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
def test_empty_bytearray(self):
# Issue #7561: operations on empty bytearrays could crash in many
# situations, due to a fragile implementation of the
# PyByteArray_AS_STRING() C macro.
self.assertRaises(ValueError, int, bytearray(b''))
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
@test.test_support.requires_docstrings
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(".")[0]', 'val.rpartition(".")[2]',
'val.splitlines()[0]', 'val.replace("", "")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super(FixedStringTest, self).fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
def test_hash(self):
# XXX check this out
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class ByteArraySubclass(bytearray):
pass
class ByteArraySubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(ByteArraySubclass, bytearray))
self.assertIsInstance(ByteArraySubclass(), bytearray)
a, b = b"abcd", b"efgh"
_a, _b = ByteArraySubclass(a), ByteArraySubclass(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = ByteArraySubclass(b"abcd")
s2 = bytearray().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is bytearray, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is bytearray)
def test_pickle(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_init_override(self):
class subclass(bytearray):
def __init__(self, newarg=1, *args, **kwargs):
bytearray.__init__(self, *args, **kwargs)
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
def test_main():
#test.test_support.run_unittest(BytesTest)
#test.test_support.run_unittest(AssortedBytesTest)
#test.test_support.run_unittest(BytesAsStringTest)
test.test_support.run_unittest(
ByteArrayTest,
ByteArrayAsStringTest,
ByteArraySubclassTest,
BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
|
|
"""Platform for the KEF Wireless Speakers."""
from datetime import timedelta
from functools import partial
import ipaddress
import logging
from aiokef import AsyncKefSpeaker
from aiokef.aiokef import DSP_OPTION_MAPPING
from getmac import get_mac_address
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
MediaPlayerEntity,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "KEF"
DEFAULT_PORT = 50001
DEFAULT_MAX_VOLUME = 0.5
DEFAULT_VOLUME_STEP = 0.05
DEFAULT_INVERSE_SPEAKER_MODE = False
DEFAULT_SUPPORTS_ON = True
DOMAIN = "kef"
SCAN_INTERVAL = timedelta(seconds=30)
SOURCES = {"LSX": ["Wifi", "Bluetooth", "Aux", "Opt"]}
SOURCES["LS50"] = SOURCES["LSX"] + ["Usb"]
CONF_MAX_VOLUME = "maximum_volume"
CONF_VOLUME_STEP = "volume_step"
CONF_INVERSE_SPEAKER_MODE = "inverse_speaker_mode"
CONF_SUPPORTS_ON = "supports_on"
CONF_STANDBY_TIME = "standby_time"
SERVICE_MODE = "set_mode"
SERVICE_DESK_DB = "set_desk_db"
SERVICE_WALL_DB = "set_wall_db"
SERVICE_TREBLE_DB = "set_treble_db"
SERVICE_HIGH_HZ = "set_high_hz"
SERVICE_LOW_HZ = "set_low_hz"
SERVICE_SUB_DB = "set_sub_db"
SERVICE_UPDATE_DSP = "update_dsp"
DSP_SCAN_INTERVAL = timedelta(seconds=3600)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TYPE): vol.In(["LS50", "LSX"]),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MAX_VOLUME, default=DEFAULT_MAX_VOLUME): cv.small_float,
vol.Optional(CONF_VOLUME_STEP, default=DEFAULT_VOLUME_STEP): cv.small_float,
vol.Optional(
CONF_INVERSE_SPEAKER_MODE, default=DEFAULT_INVERSE_SPEAKER_MODE
): cv.boolean,
vol.Optional(CONF_SUPPORTS_ON, default=DEFAULT_SUPPORTS_ON): cv.boolean,
vol.Optional(CONF_STANDBY_TIME): vol.In([20, 60]),
}
)
def get_ip_mode(host):
"""Get the 'mode' used to retrieve the MAC address."""
try:
if ipaddress.ip_address(host).version == 6:
return "ip6"
return "ip"
except ValueError:
return "hostname"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the KEF platform."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
host = config[CONF_HOST]
speaker_type = config[CONF_TYPE]
port = config[CONF_PORT]
name = config[CONF_NAME]
maximum_volume = config[CONF_MAX_VOLUME]
volume_step = config[CONF_VOLUME_STEP]
inverse_speaker_mode = config[CONF_INVERSE_SPEAKER_MODE]
supports_on = config[CONF_SUPPORTS_ON]
standby_time = config.get(CONF_STANDBY_TIME)
sources = SOURCES[speaker_type]
_LOGGER.debug(
"Setting up %s with host: %s, port: %s, name: %s, sources: %s",
DOMAIN,
host,
port,
name,
sources,
)
mode = get_ip_mode(host)
mac = await hass.async_add_executor_job(partial(get_mac_address, **{mode: host}))
unique_id = f"kef-{mac}" if mac is not None else None
media_player = KefMediaPlayer(
name,
host,
port,
maximum_volume,
volume_step,
standby_time,
inverse_speaker_mode,
supports_on,
sources,
speaker_type,
loop=hass.loop,
unique_id=unique_id,
)
if host in hass.data[DOMAIN]:
_LOGGER.debug("%s is already configured", host)
else:
hass.data[DOMAIN][host] = media_player
async_add_entities([media_player], update_before_add=True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_MODE,
{
vol.Optional("desk_mode"): cv.boolean,
vol.Optional("wall_mode"): cv.boolean,
vol.Optional("phase_correction"): cv.boolean,
vol.Optional("high_pass"): cv.boolean,
vol.Optional("sub_polarity"): vol.In(["-", "+"]),
vol.Optional("bass_extension"): vol.In(["Less", "Standard", "Extra"]),
},
"set_mode",
)
platform.async_register_entity_service(SERVICE_UPDATE_DSP, {}, "update_dsp")
def add_service(name, which, option):
options = DSP_OPTION_MAPPING[which]
dtype = type(options[0]) # int or float
platform.async_register_entity_service(
name,
{
vol.Required(option): vol.All(
vol.Coerce(float), vol.Coerce(dtype), vol.In(options)
)
},
f"set_{which}",
)
add_service(SERVICE_DESK_DB, "desk_db", "db_value")
add_service(SERVICE_WALL_DB, "wall_db", "db_value")
add_service(SERVICE_TREBLE_DB, "treble_db", "db_value")
add_service(SERVICE_HIGH_HZ, "high_hz", "hz_value")
add_service(SERVICE_LOW_HZ, "low_hz", "hz_value")
add_service(SERVICE_SUB_DB, "sub_db", "db_value")
class KefMediaPlayer(MediaPlayerEntity):
"""Kef Player Object."""
def __init__(
self,
name,
host,
port,
maximum_volume,
volume_step,
standby_time,
inverse_speaker_mode,
supports_on,
sources,
speaker_type,
loop,
unique_id,
):
"""Initialize the media player."""
self._name = name
self._sources = sources
self._speaker = AsyncKefSpeaker(
host,
port,
volume_step,
maximum_volume,
standby_time,
inverse_speaker_mode,
loop=loop,
)
self._unique_id = unique_id
self._supports_on = supports_on
self._speaker_type = speaker_type
self._state = None
self._muted = None
self._source = None
self._volume = None
self._is_online = None
self._dsp = None
self._update_dsp_task_remover = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
async def async_update(self):
"""Update latest state."""
_LOGGER.debug("Running async_update")
try:
self._is_online = await self._speaker.is_online()
if self._is_online:
(
self._volume,
self._muted,
) = await self._speaker.get_volume_and_is_muted()
state = await self._speaker.get_state()
self._source = state.source
self._state = STATE_ON if state.is_on else STATE_OFF
if self._dsp is None:
# Only do this when necessary because it is a slow operation
await self.update_dsp()
else:
self._muted = None
self._source = None
self._volume = None
self._state = STATE_OFF
except (ConnectionRefusedError, ConnectionError, TimeoutError) as err:
_LOGGER.debug("Error in `update`: %s", err)
self._state = None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
support_kef = (
SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_SELECT_SOURCE
| SUPPORT_TURN_OFF
| SUPPORT_NEXT_TRACK # only in Bluetooth and Wifi
| SUPPORT_PAUSE # only in Bluetooth and Wifi
| SUPPORT_PLAY # only in Bluetooth and Wifi
| SUPPORT_PREVIOUS_TRACK # only in Bluetooth and Wifi
)
if self._supports_on:
support_kef |= SUPPORT_TURN_ON
return support_kef
@property
def source(self):
"""Name of the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._sources
@property
def available(self):
"""Return if the speaker is reachable online."""
return self._is_online
@property
def unique_id(self):
"""Return the device unique id."""
return self._unique_id
@property
def icon(self):
"""Return the device's icon."""
return "mdi:speaker-wireless"
async def async_turn_off(self):
"""Turn the media player off."""
await self._speaker.turn_off()
async def async_turn_on(self):
"""Turn the media player on."""
if not self._supports_on:
raise NotImplementedError()
await self._speaker.turn_on()
async def async_volume_up(self):
"""Volume up the media player."""
await self._speaker.increase_volume()
async def async_volume_down(self):
"""Volume down the media player."""
await self._speaker.decrease_volume()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._speaker.set_volume(volume)
async def async_mute_volume(self, mute):
"""Mute (True) or unmute (False) media player."""
if mute:
await self._speaker.mute()
else:
await self._speaker.unmute()
async def async_select_source(self, source: str):
"""Select input source."""
if source in self.source_list:
await self._speaker.set_source(source)
else:
raise ValueError(f"Unknown input source: {source}.")
async def async_media_play(self):
"""Send play command."""
await self._speaker.set_play_pause()
async def async_media_pause(self):
"""Send pause command."""
await self._speaker.set_play_pause()
async def async_media_previous_track(self):
"""Send previous track command."""
await self._speaker.prev_track()
async def async_media_next_track(self):
"""Send next track command."""
await self._speaker.next_track()
async def update_dsp(self, _=None) -> None:
"""Update the DSP settings."""
if self._speaker_type == "LS50" and self._state == STATE_OFF:
# The LSX is able to respond when off the LS50 has to be on.
return
mode = await self._speaker.get_mode()
self._dsp = dict(
desk_db=await self._speaker.get_desk_db(),
wall_db=await self._speaker.get_wall_db(),
treble_db=await self._speaker.get_treble_db(),
high_hz=await self._speaker.get_high_hz(),
low_hz=await self._speaker.get_low_hz(),
sub_db=await self._speaker.get_sub_db(),
**mode._asdict(),
)
async def async_added_to_hass(self):
"""Subscribe to DSP updates."""
self._update_dsp_task_remover = async_track_time_interval(
self.hass, self.update_dsp, DSP_SCAN_INTERVAL
)
async def async_will_remove_from_hass(self):
"""Unsubscribe to DSP updates."""
self._update_dsp_task_remover()
self._update_dsp_task_remover = None
@property
def extra_state_attributes(self):
"""Return the DSP settings of the KEF device."""
return self._dsp or {}
async def set_mode(
self,
desk_mode=None,
wall_mode=None,
phase_correction=None,
high_pass=None,
sub_polarity=None,
bass_extension=None,
):
"""Set the speaker mode."""
await self._speaker.set_mode(
desk_mode=desk_mode,
wall_mode=wall_mode,
phase_correction=phase_correction,
high_pass=high_pass,
sub_polarity=sub_polarity,
bass_extension=bass_extension,
)
self._dsp = None
async def set_desk_db(self, db_value):
"""Set desk_db of the KEF speakers."""
await self._speaker.set_desk_db(db_value)
self._dsp = None
async def set_wall_db(self, db_value):
"""Set wall_db of the KEF speakers."""
await self._speaker.set_wall_db(db_value)
self._dsp = None
async def set_treble_db(self, db_value):
"""Set treble_db of the KEF speakers."""
await self._speaker.set_treble_db(db_value)
self._dsp = None
async def set_high_hz(self, hz_value):
"""Set high_hz of the KEF speakers."""
await self._speaker.set_high_hz(hz_value)
self._dsp = None
async def set_low_hz(self, hz_value):
"""Set low_hz of the KEF speakers."""
await self._speaker.set_low_hz(hz_value)
self._dsp = None
async def set_sub_db(self, db_value):
"""Set sub_db of the KEF speakers."""
await self._speaker.set_sub_db(db_value)
self._dsp = None
|
|
from __future__ import absolute_import
from pychron.entry.loaders.mass_spec_binary_extractor import MassSpecBinaryExtractor
__author__ = "ross"
import unittest
class Expected(object):
runid = "59273-01A"
project = "Zimmerer"
sample = "ORGAN-8"
material = "Biotite"
investigator = "Zimmerer"
fits = "LLLLLL"
locality = ""
rundate = "3/3/2010"
irradlabel = "NM-227L"
runhour = 2.0
version = 7.875
emv = [1.4503029584884644, 0.0]
optemp = 0
history = (
"Multiplier Baseline = L @ --- @ ---; Ar40 = L @ --- @ ---; Ar39 = L @ --- @ ---; "
"Ar38 = L @ --- @ ---; Ar36 = L @ --- @ ---; Ar37 = L @ --- @ ---; "
"argonlab - 3/11/2010 @ 10:08:42 AM @ --- @ ---; Ar40 bk val,er = Bracketing blanks @ --- @ ---; "
"Ar39 bk val,er = Bracketing blanks @ --- @ ---; Ar38 bk val,er = Bracketing blanks @ --- @ ---; "
"Ar36 bk val,er = Bracketing blanks @ --- @ ---; Ar37 bk val,er = Bracketing blanks @ --- @ ---"
)
scalefactor = 1000000000.0, 1000000000.0
extract_device = "CO2"
extract_value = 0.0
final_set_power = 0.5
totdur_heating = 0
totdur_atsetpoint = 0
gain = [166124.90625, 0.0]
calc_with_ratio = False
system_number = 3
mol_ref_iso = 8.004109832880028e-16
disc = 1.00600004196167
disc_err = 0.0010000000474974513
j = 0.002384300110861659
j_err = 2.048499936790904e-06
resistor_values = [1.0, 0, 0]
isogrp = "Ar"
niso = 5
nratio = 0
detectors = [1, 1, 1, 1, 1]
ndet = 1
refdetnum = 1
signormfactor = 1
ncyc = 0
isokeys = ["Ar40", "Ar39", "Ar38", "Ar36", "Ar37"]
runday = 8462.0
ncnts = [48, 72, 24, 120, 24]
isotopes = {
"Ar40": {
"background": 0.016390634700655937,
"background_err": 0.0001810772664612159,
"intercept": 1,
"intercept_err": 1,
"counts_per_cycle": 0,
},
"Ar39": {
"background": 0.00033566050115041435,
"background_err": 1.5215453458949924e-05,
"intercept": 1,
"intercept_err": 1,
"counts_per_cycle": 0,
},
"Ar36": {
"background": 7.502062362618744e-05,
"background_err": 4.699999863078119e-06,
"intercept": 1,
"intercept_err": 1,
"counts_per_cycle": 0,
},
}
baselines = [{"ncnts": 42}]
class MassSpecBinaryExtractorTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.extractor = MassSpecBinaryExtractor()
cls.specs = cls.extractor.import_file("./data/MSDataFile_7875")
cls.expected = Expected()
def test_runid(self):
self._test_spec_attr("runid")
def test_sample(self):
self._test_spec_attr("sample")
def test_material(self):
self._test_spec_attr("material")
def test_investigator(self):
self._test_spec_attr("investigator")
def test_project(self):
self._test_spec_attr("project")
def test_locality(self):
self._test_spec_attr("locality")
def test_rundate(self):
self._test_spec_attr("rundate")
def test_irradlabel(self):
self._test_spec_attr("irradlabel")
def test_fits(self):
self._test_spec_attr("fits")
# def test_comment(self):
# self._test_spec_attr('comment', '1350; 1800/20')
def test_runhour(self):
self._test_spec_attr("runhour")
def test_version(self):
self._test_spec_attr("version")
def test_optemp(self):
self._test_spec_attr("optemp")
def test_emv(self):
self._test_spec_attr("emv")
def test_history(self):
self._test_spec_attr("history")
def test_scalefactor(self):
self._test_spec_attr("scalefactor")
def test_extract_device(self):
self._test_spec_attr("extract_device")
def test_extract_value(self):
self._test_spec_attr("extract_value")
def test_final_set_power(self):
self._test_spec_attr("final_set_power")
def test_totdur_heating(self):
self._test_spec_attr("totdur_heating")
def test_totdur_atsetpoint(self):
self._test_spec_attr("totdur_atsetpoint")
def test_gain(self):
self._test_spec_attr("gain")
def test_calc_with_ratio(self):
self._test_spec_attr("calc_with_ratio")
def test_system_number(self):
self._test_spec_attr("system_number")
def test_mol_ref_iso(self):
self._test_spec_attr("mol_ref_iso")
def test_disc(self):
self._test_spec_attr("disc")
def test_disc_err(self):
self._test_spec_attr("disc_err")
def test_j(self):
self._test_spec_attr("j")
def test_j_err(self):
self._test_spec_attr("j_err")
def test_resistor_values(self):
self._test_spec_attr("resistor_values")
def test_isogrp(self):
self._test_spec_attr("isogrp")
def test_niso(self):
self._test_spec_attr("niso")
def test_nratio(self):
self._test_spec_attr("nratio")
def test_detectors(self):
self._test_spec_attr("detectors")
def test_ndet(self):
self._test_spec_attr("ndet")
def test_refdet_num(self):
self._test_spec_attr("refdetnum")
def test_signormfactor(self):
self._test_spec_attr("signormfactor")
def test_ncyc(self):
self._test_spec_attr("ncyc")
def test_isokeys(self):
self._test_spec_attr("isokeys")
def test_runday(self):
self._test_spec_attr("runday")
def test_ncnts(self):
self._test_spec_attr("ncnts")
# =================Ar40====================
def test_ar40_intercept(self):
self._test_intercept("Ar40")
def test_ar40_intercept_err(self):
self._test_intercept("Ar40", True)
def test_ar40_background(self):
self._test_background("Ar40")
def test_ar40_background_err(self):
self._test_background("Ar40", True)
def test_ar40_counts_per_cycle(self):
self._test_counts_per_cycle("Ar40")
# =================Ar39====================
def test_ar39_intercept(self):
self._test_intercept("Ar39")
def test_ar39_intercept_err(self):
self._test_intercept("Ar39", True)
def test_ar39_background(self):
self._test_background("Ar39")
def test_ar39_background(self):
self._test_background("Ar39")
def test_ar39_background_err(self):
self._test_background("Ar39", True)
def test_ar39_counts_per_cycle(self):
self._test_counts_per_cycle("Ar39")
# =================Ar36====================
def test_ar36_background(self):
self._test_background("Ar36")
def test_ar36_background_err(self):
self._test_background("Ar36", True)
def test_ar36_counts_per_cycle(self):
self._test_counts_per_cycle("Ar36")
def test_ar36_intercept(self):
self._test_intercept("Ar36")
def test_ar36_intercept_err(self):
self._test_intercept("Ar36", True)
def test_baseline_ncnts(self):
spec = self.specs[0]
baseline = spec.baselines[0]
self.assertEqual(baseline.ncnts, self.expected.baselines[0]["ncnts"])
def _test_counts_per_cycle(self, iso, idx=0):
spec = self.specs[idx]
iidx = spec.isokeys.index(iso)
isotope = spec.isotopes[iidx]
self.assertEqual(
isotope["counts_per_cycle"], self.expected.isotopes[iso]["counts_per_cycle"]
)
def _test_background(self, iso, is_err=False, idx=0):
attr = "background_err" if is_err else "background"
self._test_isotope_attr(iso, attr, idx)
def _test_intercept(self, iso, is_err=False, idx=0):
attr = "intercept_err" if is_err else "intercept"
self._test_isotope_attr(iso, attr, idx)
def _test_isotope_attr(self, iso, attr, idx):
spec = self.specs[idx]
iidx = spec.isokeys.index(iso)
isotope = spec.isotopes[iidx]
self.assertEqual(isotope[attr], self.expected.isotopes[iso][attr])
def _test_spec_attr(self, attr, idx=0):
self.assertEqual(getattr(self.specs[idx], attr), getattr(self.expected, attr))
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
docdir = os.path.join(cwd,'..','documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','rebel.Parse.js')
if not os.path.exists(js_file):
js_file = os.path.join(cwd,'..','assets','rebel.Parse.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','RebelParseModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
c = open(license_file).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
p = os.path.join(cwd, 'assets')
if not os.path.exists(p):
p = os.path.join(cwd, '..', 'assets')
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README'])
for dn in ('example','platform'):
p = os.path.join(cwd, dn)
if not os.path.exists(p):
p = os.path.join(cwd, '..', dn)
if os.path.exists(p):
zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True)
license_file = os.path.join(cwd,'LICENSE')
if not os.path.exists(license_file):
license_file = os.path.join(cwd,'..','LICENSE')
if os.path.exists(license_file):
zf.write(license_file,'%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
|
from __future__ import absolute_import
from django.contrib.auth.models import AnonymousUser
from sentry.models import (
ApiKey,
AuditLogEntryEvent,
DeletedOrganization,
DeletedTeam,
DeletedProject,
Organization,
OrganizationStatus,
)
from sentry.testutils import TestCase
from sentry.utils.audit import create_audit_entry
username = "hello" * 20
class FakeHttpRequest(object):
def __init__(self, user):
self.user = user
self.META = {"REMOTE_ADDR": "127.0.0.1"}
class CreateAuditEntryTest(TestCase):
def setUp(self):
self.user = self.create_user(username=username)
self.req = FakeHttpRequest(self.user)
self.org = self.create_organization(owner=self.user)
self.team = self.create_team(organization=self.org)
self.project = self.create_project(teams=[self.team], platform="java")
def assert_no_delete_log_created(self):
assert not DeletedOrganization.objects.filter(slug=self.org.slug).exists()
assert not DeletedTeam.objects.filter(slug=self.team.slug).exists()
assert not DeletedProject.objects.filter(slug=self.project.slug).exists()
def test_audit_entry_api(self):
org = self.create_organization()
apikey = ApiKey.objects.create(organization=org, allowed_origins="*")
req = FakeHttpRequest(AnonymousUser())
req.auth = apikey
entry = create_audit_entry(req)
assert entry.actor_key == apikey
assert entry.actor is None
assert entry.ip_address == req.META["REMOTE_ADDR"]
self.assert_no_delete_log_created()
def test_audit_entry_frontend(self):
req = FakeHttpRequest(self.create_user())
entry = create_audit_entry(req)
assert entry.actor == req.user
assert entry.actor_key is None
assert entry.ip_address == req.META["REMOTE_ADDR"]
self.assert_no_delete_log_created()
def test_audit_entry_org_delete_log(self):
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=AuditLogEntryEvent.ORG_REMOVE,
data=self.org.get_audit_log_data(),
)
assert entry.actor == self.user
assert entry.actor_label == username[:64] # needs trimming
assert entry.target_object == self.org.id
assert entry.event == AuditLogEntryEvent.ORG_REMOVE
deleted_org = DeletedOrganization.objects.get(slug=self.org.slug)
self.assert_valid_deleted_log(deleted_org, self.org)
def test_audit_entry_org_restore_log(self):
Organization.objects.filter(id=self.organization.id).update(
status=OrganizationStatus.PENDING_DELETION
)
org = Organization.objects.get(id=self.organization.id)
Organization.objects.filter(id=self.organization.id).update(
status=OrganizationStatus.DELETION_IN_PROGRESS
)
org2 = Organization.objects.get(id=self.organization.id)
Organization.objects.filter(id=self.organization.id).update(
status=OrganizationStatus.VISIBLE
)
org3 = Organization.objects.get(id=self.organization.id)
orgs = [org, org2, org3]
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=AuditLogEntryEvent.ORG_RESTORE,
data=self.org.get_audit_log_data(),
)
entry2 = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.org.id,
event=AuditLogEntryEvent.ORG_EDIT,
data=self.org.get_audit_log_data(),
)
for i in orgs:
if (
i.status == OrganizationStatus.PENDING_DELETION
or i.status == OrganizationStatus.DELETION_IN_PROGRESS
):
assert i.status != OrganizationStatus.VISIBLE
assert ("restored") in entry.get_note()
assert entry.actor == self.user
assert entry.target_object == self.org.id
assert entry.event == AuditLogEntryEvent.ORG_RESTORE
else:
assert i.status == OrganizationStatus.VISIBLE
assert ("edited") in entry2.get_note()
assert entry2.actor == self.user
assert entry2.target_object == self.org.id
assert entry2.event == AuditLogEntryEvent.ORG_EDIT
def test_audit_entry_team_delete_log(self):
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.team.id,
event=AuditLogEntryEvent.TEAM_REMOVE,
data=self.team.get_audit_log_data(),
)
assert entry.actor == self.user
assert entry.target_object == self.team.id
assert entry.event == AuditLogEntryEvent.TEAM_REMOVE
deleted_team = DeletedTeam.objects.get(slug=self.team.slug)
self.assert_valid_deleted_log(deleted_team, self.team)
def test_audit_entry_project_delete_log(self):
entry = create_audit_entry(
request=self.req,
organization=self.org,
target_object=self.project.id,
event=AuditLogEntryEvent.PROJECT_REMOVE,
data=self.project.get_audit_log_data(),
)
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == AuditLogEntryEvent.PROJECT_REMOVE
deleted_project = DeletedProject.objects.get(slug=self.project.slug)
self.assert_valid_deleted_log(deleted_project, self.project)
assert deleted_project.platform == self.project.platform
def test_audit_entry_integration_log(self):
project = self.create_project()
self.login_as(user=self.user)
entry = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=AuditLogEntryEvent.INTEGRATION_ADD,
data={"integration": "webhooks", "project": project.slug},
)
assert ("enabled") in entry.get_note()
assert entry.actor == self.user
assert entry.target_object == self.project.id
assert entry.event == AuditLogEntryEvent.INTEGRATION_ADD
entry2 = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=AuditLogEntryEvent.INTEGRATION_EDIT,
data={"integration": "webhooks", "project": project.slug},
)
assert ("edited") in entry2.get_note()
assert entry2.actor == self.user
assert entry2.target_object == self.project.id
assert entry2.event == AuditLogEntryEvent.INTEGRATION_EDIT
entry3 = create_audit_entry(
request=self.req,
organization=self.project.organization,
target_object=self.project.id,
event=AuditLogEntryEvent.INTEGRATION_REMOVE,
data={"integration": "webhooks", "project": project.slug},
)
assert ("disable") in entry3.get_note()
assert entry3.actor == self.user
assert entry3.target_object == self.project.id
assert entry3.event == AuditLogEntryEvent.INTEGRATION_REMOVE
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, too-many-locals, fixme
# pylint: disable=too-many-branches, too-many-statements
# pylint: disable=too-many-arguments
# pylint: disable=dangerous-default-value
"""Visualization module"""
from __future__ import absolute_import
import re
import copy
import json
import warnings
from .symbol import Symbol
def _str2tuple(string):
"""Convert shape string to list, internal use only.
Parameters
----------
string: str
Shape string.
Returns
-------
list of str
Represents shape.
"""
return re.findall(r"\d+", string)
def print_summary(symbol, shape=None, line_length=120, positions=[.44, .64, .74, 1.]):
"""Convert symbol for detail information.
Parameters
----------
symbol: Symbol
Symbol to be visualized.
shape: dict
A dict of shapes, str->shape (tuple), given input shapes.
line_length: int
Rotal length of printed lines
positions: list
Relative or absolute positions of log elements in each line.
Returns
------
None
Notes
-----
If ``mxnet`` is imported, the visualization module can be used in its short-form.
For example, if we ``import mxnet`` as follows::
import mxnet
this method in visualization module can be used in its short-form as::
mxnet.viz.print_summary(...)
"""
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be Symbol")
show_shape = False
if shape is not None:
show_shape = True
interals = symbol.get_internals()
_, out_shapes, _ = interals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(interals.list_outputs(), out_shapes))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
heads = set(conf["heads"][0])
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Previous Layer']
def print_row(fields, positions):
"""Print format row.
Parameters
----------
fields: list
Information field.
positions: list
Field length ratio.
Returns
------
None
"""
line = ''
for i, field in enumerate(fields):
line += str(field)
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print(line)
print('_' * line_length)
print_row(to_display, positions)
print('=' * line_length)
def print_layer_summary(node, out_shape):
"""print layer information
Parameters
----------
node: dict
Node information.
out_shape: dict
Node shape information.
Returns
------
Node total parameters.
"""
op = node["op"]
pre_node = []
pre_filter = 0
if op != "null":
inputs = node["inputs"]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_node["op"] != "null" or item[0] in heads:
# add precede
pre_node.append(input_name)
if show_shape:
if input_node["op"] != "null":
key = input_name + "_output"
else:
key = input_name
if key in shape_dict:
shape = shape_dict[key][1:]
pre_filter = pre_filter + int(shape[0])
cur_param = 0
if op == 'Convolution':
if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True':
num_group = int(node['attrs'].get('num_group', '1'))
cur_param = pre_filter * int(node["attrs"]["num_filter"]) \
// num_group
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
else:
num_group = int(node['attrs'].get('num_group', '1'))
cur_param = pre_filter * int(node["attrs"]["num_filter"]) \
// num_group
for k in _str2tuple(node["attrs"]["kernel"]):
cur_param *= int(k)
cur_param += int(node["attrs"]["num_filter"])
elif op == 'FullyConnected':
if "no_bias" in node["attrs"] and node["attrs"]["no_bias"] == 'True':
cur_param = pre_filter * int(node["attrs"]["num_hidden"])
else:
cur_param = (pre_filter+1) * int(node["attrs"]["num_hidden"])
elif op == 'BatchNorm':
key = node["name"] + "_output"
if show_shape:
num_filter = shape_dict[key][1]
cur_param = int(num_filter) * 2
elif op == 'Embedding':
cur_param = int(node["attrs"]['input_dim']) * int(node["attrs"]['output_dim'])
if not pre_node:
first_connection = ''
else:
first_connection = pre_node[0]
fields = [node['name'] + '(' + op + ')',
"x".join([str(x) for x in out_shape]),
cur_param,
first_connection]
print_row(fields, positions)
if len(pre_node) > 1:
for i in range(1, len(pre_node)):
fields = ['', '', '', pre_node[i]]
print_row(fields, positions)
return cur_param
total_params = 0
for i, node in enumerate(nodes):
out_shape = []
op = node["op"]
if op == "null" and i > 0:
continue
if op != "null" or i in heads:
if show_shape:
if op != "null":
key = node["name"] + "_output"
else:
key = node["name"]
if key in shape_dict:
out_shape = shape_dict[key][1:]
total_params += print_layer_summary(nodes[i], out_shape)
if i == len(nodes) - 1:
print('=' * line_length)
else:
print('_' * line_length)
print("Total params: {params}".format(params=total_params))
print('_' * line_length)
def plot_network(symbol, title="plot", save_format='pdf', shape=None, dtype=None, node_attrs={},
hide_weights=True):
"""Creates a visualization (Graphviz digraph object) of the given computation graph.
Graphviz must be installed for this function to work.
Parameters
----------
title: str, optional
Title of the generated visualization.
symbol: Symbol
A symbol from the computation graph. The generated digraph will visualize the part
of the computation graph required to compute `symbol`.
shape: dict, optional
Specifies the shape of the input tensors. If specified, the visualization will include
the shape of the tensors between the nodes. `shape` is a dictionary mapping
input symbol names (str) to the corresponding tensor shape (tuple).
dtype: dict, optional
Specifies the type of the input tensors. If specified, the visualization will include
the type of the tensors between the nodes. `dtype` is a dictionary mapping
input symbol names (str) to the corresponding tensor type (e.g. `numpy.float32`).
node_attrs: dict, optional
Specifies the attributes for nodes in the generated visualization. `node_attrs` is
a dictionary of Graphviz attribute names and values. For example::
node_attrs={"shape":"oval","fixedsize":"false"}
will use oval shape for nodes and allow variable sized nodes in the visualization.
hide_weights: bool, optional
If True (default), then inputs with names of form *_weight* (corresponding to weight
tensors) or *_bias* (corresponding to bias vectors) will be hidden for a cleaner
visualization.
Returns
-------
dot: Digraph
A Graphviz digraph object visualizing the computation graph to compute `symbol`.
Example
-------
>>> net = mx.sym.Variable('data')
>>> net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=128)
>>> net = mx.sym.Activation(data=net, name='relu1', act_type="relu")
>>> net = mx.sym.FullyConnected(data=net, name='fc2', num_hidden=10)
>>> net = mx.sym.SoftmaxOutput(data=net, name='out')
>>> digraph = mx.viz.plot_network(net, shape={'data':(100,200)},
... node_attrs={"fixedsize":"false"})
>>> digraph.view()
Notes
-----
If ``mxnet`` is imported, the visualization module can be used in its short-form.
For example, if we ``import mxnet`` as follows::
import mxnet
this method in visualization module can be used in its short-form as::
mxnet.viz.plot_network(...)
"""
# todo add shape support
try:
from graphviz import Digraph
except:
raise ImportError("Draw network requires graphviz library")
if not isinstance(symbol, Symbol):
raise TypeError("symbol must be a Symbol")
internals = symbol.get_internals()
draw_shape = shape is not None
if draw_shape:
_, out_shapes, _ = internals.infer_shape(**shape)
if out_shapes is None:
raise ValueError("Input shape is incomplete")
shape_dict = dict(zip(internals.list_outputs(), out_shapes))
draw_type = dtype is not None
if draw_type:
_, out_types, _ = internals.infer_type(**dtype)
if out_types is None:
raise ValueError("Input type is incomplete")
type_dict = dict(zip(internals.list_outputs(), out_types))
conf = json.loads(symbol.tojson())
nodes = conf["nodes"]
# check if multiple nodes have the same name
if len(nodes) != len(set([node["name"] for node in nodes])):
seen_nodes = set()
# find all repeated names
repeated = set(node['name'] for node in nodes if node['name'] in seen_nodes
or seen_nodes.add(node['name']))
warning_message = "There are multiple variables with the same name in your graph, " \
"this may result in cyclic graph. Repeated names: " + ','.join(repeated)
warnings.warn(warning_message, RuntimeWarning)
# default attributes of node
node_attr = {"shape": "box", "fixedsize": "true",
"width": "1.3", "height": "0.8034", "style": "filled"}
# merge the dict provided by user and the default one
node_attr.update(node_attrs)
dot = Digraph(name=title, format=save_format)
# color map
cm = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3",
"#fdb462", "#b3de69", "#fccde5")
def looks_like_weight(name):
"""Internal helper to figure out if node should be hidden with `hide_weights`.
"""
weight_like = ('_weight', '_bias', '_beta', '_gamma',
'_moving_var', '_moving_mean', '_running_var', '_running_mean')
return name.endswith(weight_like)
# make nodes
hidden_nodes = set()
for node in nodes:
op = node["op"]
name = node["name"]
# input data
attr = copy.deepcopy(node_attr)
label = name
if op == "null":
if looks_like_weight(node["name"]):
if hide_weights:
hidden_nodes.add(node["name"])
# else we don't render a node, but
# don't add it to the hidden_nodes set
# so it gets rendered as an empty oval
continue
attr["shape"] = "oval" # inputs get their own shape
label = node["name"]
attr["fillcolor"] = cm[0]
elif op == "Convolution":
label = "Convolution\n{kernel}/{stride}, {filter}".format(
kernel="x".join(_str2tuple(node["attrs"]["kernel"])),
stride="x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1",
filter=node["attrs"]["num_filter"]
)
attr["fillcolor"] = cm[1]
elif op == "FullyConnected":
label = "FullyConnected\n{hidden}".format(hidden=node["attrs"]["num_hidden"])
attr["fillcolor"] = cm[1]
elif op == "BatchNorm":
attr["fillcolor"] = cm[3]
elif op == 'Activation':
act_type = node["attrs"]["act_type"]
label = 'Activation\n{activation}'.format(activation=act_type)
attr["fillcolor"] = cm[2]
elif op == 'LeakyReLU':
attrs = node.get("attrs")
act_type = attrs.get("act_type", "Leaky") if attrs else "Leaky"
label = 'LeakyReLU\n{activation}'.format(activation=act_type)
attr["fillcolor"] = cm[2]
elif op == "Pooling":
label = "Pooling\n{pooltype}, {kernel}/{stride}".format(pooltype=node["attrs"]["pool_type"],
kernel="x".join(_str2tuple(node["attrs"]["kernel"]))
if "kernel" in node["attrs"] else "[]",
stride="x".join(_str2tuple(node["attrs"]["stride"]))
if "stride" in node["attrs"] else "1")
attr["fillcolor"] = cm[4]
elif op in ("Concat", "Flatten", "Reshape"):
attr["fillcolor"] = cm[5]
elif op == "Softmax":
attr["fillcolor"] = cm[6]
else:
attr["fillcolor"] = cm[7]
if op == "Custom":
label = node["attrs"]["op_type"]
dot.node(name=name, label=label, **attr)
# add edges
for node in nodes: # pylint: disable=too-many-nested-blocks
op = node["op"]
name = node["name"]
if op == "null":
continue
else:
inputs = node["inputs"]
if node['op'] == '_contrib_BilinearResize2D':
inputs = [inputs[0]]
for item in inputs:
input_node = nodes[item[0]]
input_name = input_node["name"]
if input_name not in hidden_nodes:
attr = {"dir": "back", 'arrowtail':'open', 'label': ''}
# add shapes
if draw_shape:
if input_node["op"] != "null":
key = input_name + "_output"
if "attrs" in input_node:
params = input_node["attrs"]
if "num_outputs" in params:
key += str(int(params["num_outputs"]) - 1)
shape = shape_dict[key][1:]
label = "x".join([str(x) for x in shape])
attr["label"] = label
else:
key = input_name
shape = shape_dict[key][1:]
label = "x".join([str(x) for x in shape])
attr["label"] = label
if draw_type:
if input_node["op"] != "null":
key = input_name + "_output"
if "attrs" in input_node:
params = input_node["attrs"]
if "num_outputs" in params:
key += str(int(params["num_outputs"]) - 1)
dtype = type_dict[key]
attr["label"] += '(' + dtype.__name__ + ')'
else:
key = input_name
dtype = type_dict[key]
attr["label"] += '(' + dtype.__name__ + ')'
dot.edge(tail_name=name, head_name=input_name, **attr)
return dot
|
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_firewall_multicast_policy
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_firewall_multicast_policy.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_firewall_multicast_policy_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_multicast_policy': {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end_port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat_ip': 'test_value_11',
'srcintf': 'test_value_12',
'start_port': '13',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_multicast_policy.fortios_firewall(input_data, fos_instance)
expected_data = {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end-port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat-ip': 'test_value_11',
'srcintf': 'test_value_12',
'start-port': '13',
'status': 'enable'
}
set_method_mock.assert_called_with('firewall', 'multicast-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_multicast_policy_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_multicast_policy': {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end_port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat_ip': 'test_value_11',
'srcintf': 'test_value_12',
'start_port': '13',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_multicast_policy.fortios_firewall(input_data, fos_instance)
expected_data = {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end-port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat-ip': 'test_value_11',
'srcintf': 'test_value_12',
'start-port': '13',
'status': 'enable'
}
set_method_mock.assert_called_with('firewall', 'multicast-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_multicast_policy_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_multicast_policy': {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end_port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat_ip': 'test_value_11',
'srcintf': 'test_value_12',
'start_port': '13',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_multicast_policy.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'multicast-policy', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_multicast_policy_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_multicast_policy': {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end_port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat_ip': 'test_value_11',
'srcintf': 'test_value_12',
'start_port': '13',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_multicast_policy.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'multicast-policy', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_multicast_policy_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_multicast_policy': {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end_port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat_ip': 'test_value_11',
'srcintf': 'test_value_12',
'start_port': '13',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_multicast_policy.fortios_firewall(input_data, fos_instance)
expected_data = {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end-port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat-ip': 'test_value_11',
'srcintf': 'test_value_12',
'start-port': '13',
'status': 'enable'
}
set_method_mock.assert_called_with('firewall', 'multicast-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_firewall_multicast_policy_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_multicast_policy': {
'random_attribute_not_valid': 'tag',
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end_port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat_ip': 'test_value_11',
'srcintf': 'test_value_12',
'start_port': '13',
'status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_multicast_policy.fortios_firewall(input_data, fos_instance)
expected_data = {
'action': 'accept',
'dnat': 'test_value_4',
'dstintf': 'test_value_5',
'end-port': '6',
'id': '7',
'logtraffic': 'enable',
'protocol': '9',
'snat': 'enable',
'snat-ip': 'test_value_11',
'srcintf': 'test_value_12',
'start-port': '13',
'status': 'enable'
}
set_method_mock.assert_called_with('firewall', 'multicast-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
|
import demistomock as demisto
from CommonServerPython import *
from dxlclient.client_config import DxlClientConfig
from dxlclient.client import DxlClient
from dxlclient.broker import Broker
from dxltieclient import TieClient
from dxltieclient.constants import HashType
from datetime import datetime
VENDOR_NAME = 'McAfee Threat Intelligence Exchange'
HASH_TYPE_KEYS = {
'md5': HashType.MD5,
'sha1': HashType.SHA1,
'sha256': HashType.SHA256
}
TRUST_LEVELS = {
'0': 'NOT_SET',
'1': 'KNOWN_MALICIOUS',
'15': 'MOST_LIKELY_MALICIOUS',
'30': 'MIGHT_BE_MALICIOUS',
'50': 'UNKNOWN',
'70': 'MIGHT_BE_TRUSTED',
'85': 'MOST_LIKELY_TRUSTED',
'99': 'KNOWN_TRUSTED',
'100': 'KNOWN_TRUSTED_INSTALLER'
}
POVIDER = {
'1': 'Global Threat Intelligence (GTI)',
'3': 'Enterprise reputation',
'5': 'Advanced Threat Defense (ATD)',
'7': 'Web Gateway (MWG)'
}
def validate_certificates_format():
if '-----BEGIN PRIVATE KEY-----' not in demisto.params()['private_key']:
return_error(
"The private key content seems to be incorrect as it doesn't start with -----BEGIN PRIVATE KEY-----")
if '-----END PRIVATE KEY-----' not in demisto.params()['private_key']:
return_error(
"The private key content seems to be incorrect as it doesn't end with -----END PRIVATE KEY-----")
if '-----BEGIN CERTIFICATE-----' not in demisto.params()['cert_file']:
return_error("The client certificates content seem to be "
"incorrect as they don't start with '-----BEGIN CERTIFICATE-----'")
if '-----END CERTIFICATE-----' not in demisto.params()['cert_file']:
return_error(
"The client certificates content seem to be incorrect as it doesn't end with -----END CERTIFICATE-----")
if not demisto.params()['broker_ca_bundle'].lstrip(" ").startswith('-----BEGIN CERTIFICATE-----'):
return_error(
"The broker certificate seem to be incorrect as they don't start with '-----BEGIN CERTIFICATE-----'")
if not demisto.params()['broker_ca_bundle'].rstrip(" ").endswith('-----END CERTIFICATE-----'):
return_error(
"The broker certificate seem to be incorrect as they don't end with '-----END CERTIFICATE-----'")
def create_error_entry(contents):
return {'ContentsFormat': formats['text'], 'Type': entryTypes['error'], 'Contents': contents}
def get_client_config():
config = DxlClientConfig(
broker_ca_bundle=broker_ca_bundle,
cert_file=cert_file,
private_key=private_key,
brokers=[Broker.parse(url) for url in broker_urls]
)
config.connect_retries = 1
config.reconnect_delay = 1
config.reconnect_delay_max = 10
return config
def get_provider(provider_id):
provider_id_str = str(provider_id)
return POVIDER.get(provider_id_str, provider_id_str)
def parse_reputation(rep):
# get trust level
trust_level = str(rep.get('trustLevel'))
verbose_trust_level = TRUST_LEVELS.get(trust_level, trust_level)
# get provider
provider_id = rep.get('providerId')
provider = get_provider(provider_id)
# get date
create_date = rep.get('createDate')
create_date_str = str(datetime.fromtimestamp(create_date))
res = {
'Trust level': trust_level,
'Trust level (verbose)': verbose_trust_level,
'Provider ID': provider_id,
'Provider (verbose)': provider,
'Created date': create_date_str
}
return res
def parse_reference(reference):
agent_guid = reference.get('agentGuid')
date = reference.get('date')
try:
date = datetime.fromtimestamp(date)
except ValueError:
date = datetime.fromtimestamp(date / 1000)
return {
'Date': str(date),
'AgentGuid': agent_guid.replace('{', '').replace('}', '') # remove brackets if exist
}
def reputations_to_table(reputations):
return [parse_reputation(rep) for rep in reputations]
def references_to_table(references):
return [parse_reference(ref) for ref in references]
def trust_level_to_score(trust_level):
if (trust_level >= 70):
return 1
elif (trust_level == 30):
return 2
elif (trust_level == 0 or trust_level == 50):
return 0
elif (trust_level < 30):
return 3
else:
# Shouldn't reach here, as the API doesn't support 31-69 values except for 50)
return 0
def get_thrust_level_and_score(reputations):
trust_level = 101 # more than the highst possible trust level
vendor = VENDOR_NAME
for rep in reputations:
rep_trust_level = rep.get('trustLevel', 0)
if rep_trust_level != 0 and rep_trust_level < trust_level:
trust_level = rep.get('trustLevel')
vendor = get_provider(rep.get('providerId'))
if trust_level == 101:
# no trust_level found
return {
'trust_level': 0,
'score': 0,
'vendor': vendor
}
score = trust_level_to_score(trust_level)
if (vendor == 'Enterprise reputation'):
vendor = VENDOR_NAME
return {
'trust_level': trust_level,
'score': score,
'vendor': vendor
}
def test():
config = get_client_config()
with DxlClient(config) as client:
client.connect()
client.disconnect()
def file(hash_inputs):
hash_list = []
for hash_value in hash_inputs:
config = get_client_config()
with DxlClient(config) as client:
client.connect()
# Create the McAfee Threat Intelligence Exchange (TIE) client
tie_client = TieClient(client)
hash_type = get_hash_type(hash_value)
hash_type_key = HASH_TYPE_KEYS.get(hash_type)
if not hash_type_key:
return create_error_entry('file argument must be sha1(40 charecters) or sha256(64 charecters)'
' or md5(32 charecters)')
hash_param = {}
hash_param[hash_type_key] = hash_value
res = tie_client.get_file_reputation(hash_param)
reputations = res.values()
table = reputations_to_table(reputations)
# creaet context
context_file = {}
hash_type_uppercase = hash_type.upper()
tl_score = get_thrust_level_and_score(reputations)
context_file[hash_type_uppercase] = hash_value
context_file['TrustLevel'] = tl_score['trust_level']
context_file['Vendor'] = tl_score['vendor']
dbot_score = [{'Indicator': hash_value, 'Type': 'hash', 'Vendor': tl_score['vendor'],
'Score': tl_score['score']},
{'Indicator': hash_value, 'Type': 'file', 'Vendor': tl_score['vendor'],
'Score': tl_score['score']}]
if tl_score['score'] >= 2:
context_file['Malicious'] = {
'Vendor': tl_score['vendor'],
'Score': tl_score['score'],
'Description': 'Trust level is ' + str(tl_score['trust_level'])
}
ec = {'DBotScore': dbot_score, outputPaths['file']: context_file}
hash_list.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': reputations,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('McAfee TIE Hash Reputations For %s:' % (hash_value,), table),
'EntryContext': ec
})
return hash_list
def file_references(hash):
config = get_client_config()
with DxlClient(config) as client:
client.connect()
# Create the McAfee Threat Intelligence Exchange (TIE) client
tie_client = TieClient(client)
hash_type = get_hash_type(hash)
hash_type_key = HASH_TYPE_KEYS.get(hash_type)
if not hash_type_key:
return create_error_entry('file argument must be sha1(40 charecters) or sha256(64 charecters) or md5(32 charecters)')
hash_param = {}
hash_param[hash_type_key] = hash
references = tie_client.get_file_first_references(hash_param)
table = references_to_table(references)
# creaet context
context_file = {}
hash_type_uppercase = hash_type.upper()
context_file[hash_type_uppercase] = hash
context_file['References'] = table
ec = {}
ec[outputPaths['file']] = context_file
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': references,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('References for hash %s' % (hash,), table),
'EntryContext': ec
}
def set_file_reputation(hash, trust_level, filename, comment):
config = get_client_config()
# find trust_level key
trust_level_key = None
for k, v in TRUST_LEVELS.iteritems():
if v == trust_level:
trust_level_key = k
if not trust_level_key:
return create_error_entry('illigale argument trust_level %s. Choose value from predefined values' % (trust_level, ))
with DxlClient(config) as client:
client.connect()
tie_client = TieClient(client)
hash_type = get_hash_type(hash)
hash_type_key = HASH_TYPE_KEYS.get(hash_type)
if not hash_type_key:
return create_error_entry('file argument must be sha1(40 charecters) or sha256(64 charecters) or md5(32 charecters)')
hash_param = {}
hash_param[hash_type_key] = hash
try:
tie_client.set_file_reputation(trust_level_key, hash_param, filename, comment)
return 'Successfully set file repuation'
except Exception as ex:
return create_error_entry(str(ex))
def main():
try:
args = demisto.args()
if demisto.command() == 'test-module':
test()
demisto.results('ok')
elif demisto.command() == 'file':
results = file(argToList(args.get('file')))
demisto.results(results)
elif demisto.command() == 'tie-file-references':
results = file_references(args.get('file'))
demisto.results(results)
elif demisto.command() == 'tie-set-file-reputation':
results = set_file_reputation(
args.get('file'),
args.get('trust_level'),
args.get('filename'),
args.get('comment')
)
demisto.results(results)
except Exception as e:
validate_certificates_format()
return_error(str(e))
if __name__ in ['__main__', '__builtin__', 'builtins']:
broker_ca_bundle = './brokercerts.crt'
with open(broker_ca_bundle, "w") as text_file:
text_file.write(demisto.params()['broker_ca_bundle'])
cert_file = './cert_file.crt'
with open(cert_file, "w") as text_file:
text_file.write(demisto.params()['cert_file'])
private_key = './private_key.key'
with open(private_key, "w") as text_file:
text_file.write(demisto.params()['private_key'])
broker_urls = demisto.params()['broker_urls'].split(',')
main()
|
|
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.cast import can_hold_element
from pandas.core.dtypes.dtypes import (
ExtensionDtype,
PandasDtype,
)
from pandas.core.dtypes.generic import ABCPandasArray
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.numpy_ import PandasArray
from pandas.core.internals import (
blocks,
managers,
)
from pandas.tests.extension import base
# TODO(ArrayManager) PandasArray
pytestmark = td.skip_array_manager_not_yet_implemented
def _extract_array_patched(obj):
if isinstance(obj, (pd.Index, pd.Series)):
obj = obj._values
if isinstance(obj, ABCPandasArray):
# TODO for reasons unclear, we get here in a couple of tests
# with PandasArray._typ *not* patched
obj = obj.to_numpy()
return obj
def _can_hold_element_patched(obj, element) -> bool:
if isinstance(element, PandasArray):
element = element.to_numpy()
return can_hold_element(obj, element)
orig_assert_attr_equal = tm.assert_attr_equal
def _assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
patch tm.assert_attr_equal so PandasDtype("object") is closed enough to
np.dtype("object")
"""
if attr == "dtype":
lattr = getattr(left, "dtype", None)
rattr = getattr(right, "dtype", None)
if isinstance(lattr, PandasDtype) and not isinstance(rattr, PandasDtype):
left = left.astype(lattr.numpy_dtype)
elif isinstance(rattr, PandasDtype) and not isinstance(lattr, PandasDtype):
right = right.astype(rattr.numpy_dtype)
orig_assert_attr_equal(attr, left, right, obj)
@pytest.fixture(params=["float", "object"])
def dtype(request):
return PandasDtype(np.dtype(request.param))
@pytest.fixture
def allow_in_pandas(monkeypatch):
"""
A monkeypatch to tells pandas to let us in.
By default, passing a PandasArray to an index / series / frame
constructor will unbox that PandasArray to an ndarray, and treat
it as a non-EA column. We don't want people using EAs without
reason.
The mechanism for this is a check against ABCPandasArray
in each constructor.
But, for testing, we need to allow them in pandas. So we patch
the _typ of PandasArray, so that we evade the ABCPandasArray
check.
"""
with monkeypatch.context() as m:
m.setattr(PandasArray, "_typ", "extension")
m.setattr(managers, "_extract_array", _extract_array_patched)
m.setattr(blocks, "can_hold_element", _can_hold_element_patched)
m.setattr(tm.asserters, "assert_attr_equal", _assert_attr_equal)
yield
@pytest.fixture
def data(allow_in_pandas, dtype):
if dtype.numpy_dtype == "object":
return pd.Series([(i,) for i in range(100)]).array
return PandasArray(np.arange(1, 101, dtype=dtype._dtype))
@pytest.fixture
def data_missing(allow_in_pandas, dtype):
if dtype.numpy_dtype == "object":
return PandasArray(np.array([np.nan, (1,)], dtype=object))
return PandasArray(np.array([np.nan, 1.0]))
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def na_cmp():
def cmp(a, b):
return np.isnan(a) and np.isnan(b)
return cmp
@pytest.fixture
def data_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, C, A] with
A < B < C
"""
if dtype.numpy_dtype == "object":
# Use an empty tuple for first element, then remove,
# to disable np.array's shape inference.
return PandasArray(np.array([(), (2,), (3,), (1,)], dtype=object)[1:])
return PandasArray(np.array([1, 2, 0]))
@pytest.fixture
def data_missing_for_sorting(allow_in_pandas, dtype):
"""Length-3 array with a known sort order.
This should be three items [B, NA, A] with
A < B and NA missing.
"""
if dtype.numpy_dtype == "object":
return PandasArray(np.array([(1,), np.nan, (0,)], dtype=object))
return PandasArray(np.array([1, np.nan, 0]))
@pytest.fixture
def data_for_grouping(allow_in_pandas, dtype):
"""Data for factorization, grouping, and unique tests.
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
if dtype.numpy_dtype == "object":
a, b, c = (1,), (2,), (3,)
else:
a, b, c = np.arange(3)
return PandasArray(
np.array([b, b, np.nan, np.nan, a, a, b, c], dtype=dtype.numpy_dtype)
)
@pytest.fixture
def skip_numpy_object(dtype, request):
"""
Tests for PandasArray with nested data. Users typically won't create
these objects via `pd.array`, but they can show up through `.array`
on a Series with nested data. Many of the base tests fail, as they aren't
appropriate for nested data.
This fixture allows these tests to be skipped when used as a usefixtures
marker to either an individual test or a test class.
"""
if dtype == "object":
mark = pytest.mark.xfail(reason="Fails for object dtype")
request.node.add_marker(mark)
skip_nested = pytest.mark.usefixtures("skip_numpy_object")
class BaseNumPyTests:
@classmethod
def assert_series_equal(cls, left, right, *args, **kwargs):
# base class tests hard-code expected values with numpy dtypes,
# whereas we generally want the corresponding PandasDtype
if (
isinstance(right, pd.Series)
and not isinstance(right.dtype, ExtensionDtype)
and isinstance(left.dtype, PandasDtype)
):
right = right.astype(PandasDtype(right.dtype))
return tm.assert_series_equal(left, right, *args, **kwargs)
class TestCasting(BaseNumPyTests, base.BaseCastingTests):
@skip_nested
def test_astype_str(self, data):
# ValueError: setting an array element with a sequence
super().test_astype_str(data)
class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests):
@pytest.mark.skip(reason="We don't register our dtype")
# We don't want to register. This test should probably be split in two.
def test_from_dtype(self, data):
pass
@skip_nested
def test_series_constructor_scalar_with_index(self, data, dtype):
# ValueError: Length of passed values is 1, index implies 3.
super().test_series_constructor_scalar_with_index(data, dtype)
class TestDtype(BaseNumPyTests, base.BaseDtypeTests):
@pytest.mark.skip(reason="Incorrect expected.")
# we unsurprisingly clash with a NumPy name.
def test_check_dtype(self, data):
pass
class TestGetitem(BaseNumPyTests, base.BaseGetitemTests):
@skip_nested
def test_getitem_scalar(self, data):
# AssertionError
super().test_getitem_scalar(data)
class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
def test_groupby_extension_apply(
self, data_for_grouping, groupby_apply_op, request
):
dummy = groupby_apply_op([None])
if (
isinstance(dummy, pd.Series)
and data_for_grouping.dtype.numpy_dtype == object
):
mark = pytest.mark.xfail(reason="raises in MultiIndex construction")
request.node.add_marker(mark)
super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)
class TestInterface(BaseNumPyTests, base.BaseInterfaceTests):
@skip_nested
def test_array_interface(self, data):
# NumPy array shape inference
super().test_array_interface(data)
class TestMethods(BaseNumPyTests, base.BaseMethodsTests):
@skip_nested
def test_shift_fill_value(self, data):
# np.array shape inference. Shift implementation fails.
super().test_shift_fill_value(data)
@skip_nested
def test_fillna_copy_frame(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_frame(data_missing)
@skip_nested
def test_fillna_copy_series(self, data_missing):
# The "scalar" for this array isn't a scalar.
super().test_fillna_copy_series(data_missing)
@skip_nested
def test_searchsorted(self, data_for_sorting, as_series):
# Test setup fails.
super().test_searchsorted(data_for_sorting, as_series)
@pytest.mark.xfail(reason="PandasArray.diff may fail on dtype")
def test_diff(self, data, periods):
return super().test_diff(data, periods)
class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests):
divmod_exc = None
series_scalar_exc = None
frame_scalar_exc = None
series_array_exc = None
@skip_nested
def test_divmod(self, data):
super().test_divmod(data)
@skip_nested
def test_divmod_series_array(self, data):
ser = pd.Series(data)
self._check_divmod_op(ser, divmod, data, exc=None)
@skip_nested
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators, request):
opname = all_arithmetic_operators
if data.dtype.numpy_dtype == object and opname not in ["__add__", "__radd__"]:
mark = pytest.mark.xfail(reason="Fails for object dtype")
request.node.add_marker(mark)
super().test_arith_series_with_array(data, all_arithmetic_operators)
@skip_nested
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
pass
class TestNumericReduce(BaseNumPyTests, base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
result = getattr(s, op_name)(skipna=skipna)
# avoid coercing int -> float. Just cast to the actual numpy type.
expected = getattr(s.astype(s.dtype._dtype), op_name)(skipna=skipna)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("skipna", [True, False])
def test_reduce_series(self, data, all_boolean_reductions, skipna):
super().test_reduce_series(data, all_boolean_reductions, skipna)
@skip_nested
class TestBooleanReduce(BaseNumPyTests, base.BaseBooleanReduceTests):
pass
class TestMissing(BaseNumPyTests, base.BaseMissingTests):
@skip_nested
def test_fillna_series(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_series(data_missing)
@skip_nested
def test_fillna_frame(self, data_missing):
# Non-scalar "scalar" values.
super().test_fillna_frame(data_missing)
class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
@pytest.mark.skip(reason="Incorrect expected.")
def test_merge(self, data, na_value):
# Fails creating expected (key column becomes a PandasDtype because)
super().test_merge(data, na_value)
class TestSetitem(BaseNumPyTests, base.BaseSetitemTests):
@skip_nested
def test_setitem_sequence_broadcasts(self, data, box_in_series):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_sequence_broadcasts(data, box_in_series)
@skip_nested
def test_setitem_loc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_loc_scalar_mixed(data)
@skip_nested
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_loc_scalar_multiple_homogoneous(data)
@skip_nested
def test_setitem_iloc_scalar_mixed(self, data):
# AssertionError
super().test_setitem_iloc_scalar_mixed(data)
@skip_nested
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
# AssertionError
super().test_setitem_iloc_scalar_multiple_homogoneous(data)
@skip_nested
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_broadcast(self, data, setter):
# ValueError: cannot set using a list-like indexer with a different
# length than the value
super().test_setitem_mask_broadcast(data, setter)
@skip_nested
def test_setitem_scalar_key_sequence_raise(self, data):
# Failed: DID NOT RAISE <class 'ValueError'>
super().test_setitem_scalar_key_sequence_raise(data)
# TODO: there is some issue with PandasArray, therefore,
# skip the setitem test for now, and fix it later (GH 31446)
@skip_nested
@pytest.mark.parametrize(
"mask",
[
np.array([True, True, True, False, False]),
pd.array([True, True, True, False, False], dtype="boolean"),
],
ids=["numpy-array", "boolean-array"],
)
def test_setitem_mask(self, data, mask, box_in_series):
super().test_setitem_mask(data, mask, box_in_series)
def test_setitem_mask_raises(self, data, box_in_series):
super().test_setitem_mask_raises(data, box_in_series)
@skip_nested
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
def test_setitem_integer_array(self, data, idx, box_in_series):
super().test_setitem_integer_array(data, idx, box_in_series)
@pytest.mark.parametrize(
"idx, box_in_series",
[
([0, 1, 2, pd.NA], False),
pytest.param([0, 1, 2, pd.NA], True, marks=pytest.mark.xfail),
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
],
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
)
def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
super().test_setitem_integer_with_missing_raises(data, idx, box_in_series)
@skip_nested
def test_setitem_slice(self, data, box_in_series):
super().test_setitem_slice(data, box_in_series)
@skip_nested
def test_setitem_loc_iloc_slice(self, data):
super().test_setitem_loc_iloc_slice(data)
def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
# https://github.com/pandas-dev/pandas/issues/32395
df = expected = pd.DataFrame({"data": pd.Series(data)})
result = pd.DataFrame(index=df.index)
# because result has object dtype, the attempt to do setting inplace
# is successful, and object dtype is retained
key = full_indexer(df)
result.loc[key, "data"] = df["data"]
# base class method has expected = df; PandasArray behaves oddly because
# we patch _typ for these tests.
if data.dtype.numpy_dtype != object:
if not isinstance(key, slice) or key != slice(None):
expected = pd.DataFrame({"data": data.to_numpy()})
self.assert_frame_equal(result, expected)
@skip_nested
class TestParsing(BaseNumPyTests, base.BaseParsingTests):
pass
class Test2DCompat(BaseNumPyTests, base.Dim2CompatTests):
pass
|
|
# -*- coding: utf-8 -*-
""" Wrap xtide's libtcd.
"""
from __future__ import absolute_import
from ctypes import (
c_bool,
c_char,
c_char_p,
c_double,
c_float,
c_int16,
c_int32,
c_uint8,
c_uint16,
c_uint32,
cdll,
sizeof,
Structure,
CFUNCTYPE,
POINTER,
)
ENCODING = 'iso-8859-1' # all strings encoded iso-8859-1
assert sizeof(c_float) == 4
c_float32 = c_float
assert sizeof(c_double) == 8
c_float64 = c_double
ONELINER_LENGTH = 90
MONOLOGUE_LENGTH = 10000
MAX_CONSTITUENTS = 255
# enum TIDE_RECORD_TYPE
REFERENCE_STATION = 1
SUBORDINATE_STATION = 2
NULLSLACKOFFSET = 0xA00
AMPLITUDE_EPSILON = 0.00005
class DB_HEADER_PUBLIC(Structure):
_fields_ = [
('version', c_char * ONELINER_LENGTH),
('major_rev', c_uint32),
('minor_rev', c_uint32),
('last_modified', c_char * ONELINER_LENGTH),
('number_of_records', c_uint32),
('start_year', c_int32),
('number_of_years', c_uint32),
('constituents', c_uint32),
('level_unit_types', c_uint32),
('dir_unit_types', c_uint32),
('restriction_types', c_uint32),
('datum_types', c_uint32),
('countries', c_uint32),
('tzfiles', c_uint32),
('legaleses', c_uint32),
('pedigree_types', c_uint32),
]
class TIDE_STATION_HEADER(Structure):
_fields_ = [
('record_number', c_int32),
('record_size', c_uint32),
('record_type', c_uint8),
('latitude', c_float64),
('longitude', c_float64),
('reference_station', c_int32),
('tzfile', c_int16),
('name', c_char * ONELINER_LENGTH),
]
class TIDE_RECORD(Structure):
_anonymous_ = ['header']
_fields_ = [
('header', TIDE_STATION_HEADER),
('country', c_int16),
('source', c_char * ONELINER_LENGTH),
('restriction', c_uint8),
('comments', c_char * MONOLOGUE_LENGTH),
('notes', c_char * MONOLOGUE_LENGTH),
('legalese', c_uint8),
('station_id_context', c_char * ONELINER_LENGTH),
('station_id', c_char * ONELINER_LENGTH),
('date_imported', c_uint32),
('xfields', c_char * MONOLOGUE_LENGTH),
('direction_units', c_uint8),
('min_direction', c_int32),
('max_direction', c_int32),
('level_units', c_uint8),
# type 1 only
('datum_offset', c_float32),
('datum', c_int16),
('zone_offset', c_int32),
('expiration_date', c_uint32),
('months_on_station', c_uint16),
('last_date_on_station', c_uint32),
('confidence', c_uint8),
('amplitude', c_float32 * MAX_CONSTITUENTS),
('epoch', c_float32 * MAX_CONSTITUENTS),
# type 2 only
('min_time_add', c_int32),
('min_level_add', c_float32),
('min_level_multiply', c_float32),
('max_time_add', c_int32),
('max_level_add', c_float32),
('max_level_multiply', c_float32),
('flood_begins', c_int32),
('ebb_begins', c_int32),
]
class Error(Exception):
pass
_lib = cdll.LoadLibrary("libtcd.so.0")
def _check_bool(result, func, args):
if not result:
raise Error("%s failed" % func.__name__)
return args
_marker = object()
class _Param(object):
""" Marker for parameter types. """
direction_flag = 1 # input parameter, by default
def __init__(self, typ, name=None, default=_marker):
self.typ = typ
if default is not _marker:
self.paramflag = (self.direction_flag, name, default)
elif name:
self.paramflag = (self.direction_flag, name)
else:
self.paramflag = (self.direction_flag,)
class _OutputParam(_Param):
direction_flag = 2 # output parameter
def _to_param(param_or_type):
if isinstance(param_or_type, _Param):
return param_or_type
return _Param(param_or_type)
def _declare(name, *params, **kwargs):
params = list(map(_to_param, params))
argtypes = tuple(param.typ for param in params)
paramflags = tuple(param.paramflag for param in params)
restype = kwargs.get('restype')
errcheck = kwargs.get('errcheck')
func = CFUNCTYPE(restype, *argtypes)((name, _lib), paramflags)
func.__name__ = name
if errcheck:
func.errcheck = errcheck
globals()[name] = func
_declare('dump_tide_record', _Param(POINTER(TIDE_RECORD), 'rec'))
# String tables
for _name in ('country',
'tzfile',
'level_units',
'dir_units',
'restriction',
'datum',
'legalese',
'constituent',
'station'):
_declare('get_' + _name, c_int32, restype=c_char_p)
_declare('find_' + _name, c_char_p, restype=c_int32)
for _name in 'country', 'tzfile', 'restriction', 'datum', 'legalese':
for _pfx in ('add_', 'find_or_add_'):
_declare(_pfx + _name,
_Param(c_char_p, 'name'),
_Param(POINTER(DB_HEADER_PUBLIC), 'db', default=None),
restype=c_int32)
_declare('get_speed', c_int32, restype=c_float64)
_declare('set_speed', c_int32, c_float64)
_declare('get_equilibrium', c_int32, c_int32, restype=c_float32)
_declare('set_equilibrium', c_int32, c_int32, c_float32)
_declare('get_node_factor', c_int32, c_int32, restype=c_float32)
_declare('set_node_factor', c_int32, c_int32, c_float32)
_declare('get_equilibriums', c_int32, restype=POINTER(c_float32))
_declare('get_node_factors', c_int32, restype=POINTER(c_float32))
_declare('get_time', c_char_p, restype=c_int32)
_declare('ret_time', c_int32, restype=c_char_p)
_declare('ret_time_neat', c_int32, restype=c_char_p)
_declare('ret_date', c_uint32, restype=c_char_p)
_declare('search_station', c_char_p, restype=c_int32)
_declare('open_tide_db', c_char_p, restype=c_bool, errcheck=_check_bool)
_declare('close_tide_db')
_declare('create_tide_db',
_Param(c_char_p, 'file'),
_Param(c_uint32, 'constituents'),
_Param(POINTER(c_char_p), 'constituent'),
_Param(POINTER(c_float64), 'speed'),
_Param(c_int32, 'start_year'),
_Param(c_uint32, 'num_years'),
_Param(POINTER(POINTER(c_float32)), 'equilibrium'),
_Param(POINTER(POINTER(c_float32)), 'node_factor'),
restype=c_bool, errcheck=_check_bool)
_declare('get_tide_db_header', restype=DB_HEADER_PUBLIC)
def _check_return_none_on_failure(result, func, args):
if isinstance(result, bool):
success = result
else:
success = result >= 0 # result is index or -1
rval = args[-1]
return rval if success else None
_declare('get_partial_tide_record',
_Param(c_int32, 'num'),
_OutputParam(POINTER(TIDE_STATION_HEADER)),
restype=c_bool, errcheck=_check_return_none_on_failure)
_declare('get_next_partial_tide_record',
_OutputParam(POINTER(TIDE_STATION_HEADER)),
restype=c_int32, errcheck=_check_return_none_on_failure)
_declare('get_nearest_partial_tide_record',
_Param(c_float64, 'lat'),
_Param(c_float64, 'lon'),
_OutputParam(POINTER(TIDE_STATION_HEADER)),
restype=c_int32, errcheck=_check_return_none_on_failure)
_declare('read_tide_record',
_Param(c_int32, 'num'),
_OutputParam(POINTER(TIDE_RECORD)),
restype=c_int32, errcheck=_check_return_none_on_failure)
_declare('read_next_tide_record',
_OutputParam(POINTER(TIDE_RECORD)),
restype=c_int32, errcheck=_check_return_none_on_failure)
_declare('add_tide_record',
_Param(POINTER(TIDE_RECORD), 'rec'),
_Param(POINTER(DB_HEADER_PUBLIC), 'db', default=None),
restype=c_bool, errcheck=_check_bool)
_declare('update_tide_record',
_Param(c_int32, 'num'),
_Param(POINTER(TIDE_RECORD), 'rec'),
_Param(POINTER(DB_HEADER_PUBLIC), 'db', default=None),
restype=c_bool, errcheck=_check_bool)
_declare('delete_tide_record',
_Param(c_int32, 'num'),
_Param(POINTER(DB_HEADER_PUBLIC), 'db', default=None),
restype=c_bool, errcheck=_check_bool)
_declare('infer_constituents',
POINTER(TIDE_RECORD),
restype=c_bool, errcheck=_check_bool)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class StorageAccountsOperations(object):
"""StorageAccountsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def check_name_availability(
self, name, custom_headers=None, raw=False, **operation_config):
"""Checks that the storage account name is valid and is not already in
use.
:param name:
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CheckNameAvailabilityResult
<azure.mgmt.storage.v2016_12_01.models.CheckNameAvailabilityResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
account_name = models.StorageAccountCheckNameAvailabilityParameters(name=name)
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CheckNameAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Asynchronously creates a new storage account with the specified
parameters. If an account is already created and a subsequent create
request is issued with different properties, the account properties
will be updated. If an account is already created and a subsequent
create or update request is issued with the exact same set of
properties, the request will succeed.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: :class:`StorageAccountCreateParameters
<azure.mgmt.storage.v2016_12_01.models.StorageAccountCreateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`StorageAccount
<azure.mgmt.storage.v2016_12_01.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_properties(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Returns the properties for the specified storage account including but
not limited to name, SKU name, location, and account status. The
ListKeys operation should be used to retrieve storage keys.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccount
<azure.mgmt.storage.v2016_12_01.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""The update operation can be used to update the SKU, encryption, access
tier, or tags for a storage account. It can also be used to map the
account to a custom domain. Only one custom domain is supported per
storage account; the replacement/change of custom domain is not
supported. In order to replace an old custom domain, the old value must
be cleared/unregistered before a new value can be set. The update of
multiple properties is supported. This call does not change the storage
keys for the account. If you want to change the storage account keys,
use the regenerate keys operation. The location and name of the storage
account cannot be changed after creation.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the updated account.
:type parameters: :class:`StorageAccountUpdateParameters
<azure.mgmt.storage.v2016_12_01.models.StorageAccountUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccount
<azure.mgmt.storage.v2016_12_01.models.StorageAccount>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the subscription. Note
that storage keys are not returned; use the ListKeys operation for
this.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountPaged
<azure.mgmt.storage.v2016_12_01.models.StorageAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the storage accounts available under the given resource
group. Note that storage keys are not returned; use the ListKeys
operation for this.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountPaged
<azure.mgmt.storage.v2016_12_01.models.StorageAccountPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.StorageAccountPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_keys(
self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):
"""Lists the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountListKeysResult
<azure.mgmt.storage.v2016_12_01.models.StorageAccountListKeysResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountListKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def regenerate_key(
self, resource_group_name, account_name, key_name, custom_headers=None, raw=False, **operation_config):
"""Regenerates one of the access keys for the specified storage account.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param key_name:
:type key_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StorageAccountListKeysResult
<azure.mgmt.storage.v2016_12_01.models.StorageAccountListKeysResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
regenerate_key1 = models.StorageAccountRegenerateKeyParameters(key_name=key_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(regenerate_key1, 'StorageAccountRegenerateKeyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccountListKeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_account_sas(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""List SAS credentials of a storage account.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list SAS credentials
for the storage account.
:type parameters: :class:`AccountSasParameters
<azure.mgmt.storage.v2016_12_01.models.AccountSasParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ListAccountSasResponse
<azure.mgmt.storage.v2016_12_01.models.ListAccountSasResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AccountSasParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ListAccountSasResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_service_sas(
self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config):
"""List service SAS credentials of a specific resource.
:param resource_group_name: The name of the resource group within the
user's subscription. The name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list service SAS
credentials.
:type parameters: :class:`ServiceSasParameters
<azure.mgmt.storage.v2016_12_01.models.ServiceSasParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ListServiceSasResponse
<azure.mgmt.storage.v2016_12_01.models.ListServiceSasResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ServiceSasParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ListServiceSasResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
""" Module for vetting low S/N run
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import sys
import io, json
import pdb
import numpy as np
from matplotlib import pyplot as plt
from pkg_resources import resource_filename
from astropy import units as u
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord, match_coordinates_sky
from linetools import utils as ltu
from pyigm.abssys.dla import DLASystem
from pyigm.abssys.lls import LLSSystem
from pyigm.surveys.llssurvey import LLSSurvey
from pyigm.surveys.dlasurvey import DLASurvey, dla_stat
from dla_cnn.io import load_ml_dr7
def pred_to_tbl(pred_file):
spec_list = ltu.loadjson(pred_file)
ids, zabs, conf, NHI, sigNHI, biasNHI = [], [], [], [], [], []
# Loop to my loop
for ss,spec in enumerate(spec_list):
for dla in spec['dlas']:
if dla['type'] == "LYB":
continue
ids.append(ss)
zabs.append(dla['z_dla'])
NHI.append(dla['column_density'])
sigNHI.append(dla['std_column_density'])
biasNHI.append(dla['column_density_bias_adjust'])
conf.append(dla['dla_confidence'])
# Table
dla_tbl = Table()
dla_tbl['ids'] = ids
dla_tbl['zabs'] = zabs
dla_tbl['conf'] = conf
dla_tbl['sigNHI'] = sigNHI
dla_tbl['biasNHI'] = biasNHI
dla_tbl['NHI'] = NHI
# Return
return dla_tbl
def test_to_tbl(test_file):
test_dict = ltu.loadjson(test_file)
ids, zabs, sl, NHI, = [], [], [], []
ntest = len(test_dict)
# Loop to my loop
for ss in range(ntest):
ndla = test_dict[str(ss)]['nDLA']
for idla in range(ndla):
ids.append(ss)
zabs.append(test_dict[str(ss)][str(idla)]['zabs'])
NHI.append(test_dict[str(ss)][str(idla)]['NHI'])
sl.append(test_dict[str(ss)]['sl'])
# Table
test_tbl = Table()
test_tbl['ids'] = ids
test_tbl['zabs'] = zabs
test_tbl['NHI'] = NHI
test_tbl['sl'] = sl
# Return
return test_tbl
def score_ml_test(dz_toler=0.015, outfile='vette_lows2n.json',
test_file='data/lows2n_train_83557_10000.json',
pred_file='../Analysis/visuals_lows2n/lows2n_predictions.json.gz'):
"""
Parameters
----------
dz_toler
outfile
test_file
pred_file
Returns
-------
writes
"""
# Load Test
lows2n_dlas = test_to_tbl(test_file)
ntest = len(lows2n_dlas)
# Load ML
ml_abs = pred_to_tbl(pred_file)
ml_dlas = ml_abs['NHI'] >= 20.3
# Loop on test DLAs and save indices of the matches
test_ml_idx = np.zeros(ntest).astype(int) - 99999
for ii in range(ntest):
# Match to ML sl
in_sl = np.where(ml_abs['ids'] == lows2n_dlas['ids'][ii])[0]
dla_mts = np.where(np.abs(ml_abs['zabs'][in_sl] - lows2n_dlas['zabs'][ii]) < dz_toler)[0]
nmt = len(dla_mts)
if nmt == 0: # No match within dz
pass
elif nmt == 1: # No match
if ml_abs['NHI'][in_sl][dla_mts[0]] > 20.2999:
test_ml_idx[ii] = in_sl[dla_mts[0]]
else:
test_ml_idx[ii] = -1 * in_sl[dla_mts[0]]
else: # Very rarely the ML identifies two DLAs in the window
print("Double hit in lows2n DLA {:d}".format(ii))
imin = np.argmin(np.abs(ml_abs['zabs'][in_sl] - lows2n_dlas['zabs'][ii]))
test_ml_idx[ii] = in_sl[imin]
match = test_ml_idx >= 0
print("There were {:d} DLAs discovered by the CNN".format(np.sum(ml_dlas)))
print("There were {:d} DLAs recovered out of {:d}".format(np.sum(match), ntest))
print("There were {:d} false positive DLAs".format(np.sum(ml_dlas)-np.sum(match)))
# Write out misses
misses = np.where(test_ml_idx == -99999)[0]
print("There were {:d} DLAs missed altogether (false negatives)".format(len(misses)))
mtbl = Table()
for key in ['sl', 'NHI', 'zabs']:
mtbl[key] = lows2n_dlas[key][misses]
mtbl.write('lows2n_misses.ascii', format='ascii.fixed_width', overwrite=True)
# Write out SLLS
sllss = np.where((test_ml_idx < 0) & (test_ml_idx != -99999))[0]
print("There were {:d} DLAs recovered as SLLS".format(len(sllss)))
stbl = Table()
for key in ['sl', 'NHI', 'zabs']:
stbl[key] = lows2n_dlas[key][sllss]
mtbl.write('lows2n_slls.ascii', format='ascii.fixed_width', overwrite=True)
# Save
out_dict = {}
out_dict['test_idx'] = test_ml_idx # -1 are misses, -99 are not DLAs in PN, -9 are SLLS
ltu.savejson(outfile, ltu.jsonify(out_dict), overwrite=True)
# Stats on dz
dz = ml_abs['zabs'][test_ml_idx[match]] - lows2n_dlas['zabs'][match]
print("Median dz = {} and sigma(dz)= {}".format(np.median(dz), np.std(dz)))
def examine_false_pos(test_file='data/test_dlas_96629_10000.json.gz',
pred_file='data/test_dlas_96629_predictions.json.gz',
vette_file='vette_10k.json'):
""" Examine false positives in the Test set (held out)
"""
from pyigm.surveys.dlasurvey import DLASurvey
import h5py
import json
from matplotlib import pyplot as plt
# Load Test
test_dlas = test_to_tbl(test_file)
ntest = len(test_dlas)
# Load hdf5
CNN_result_path = '/home/xavier/Projects/ML_DLA_results/CNN/'
hdf5_datafile = CNN_result_path+'gensample_hdf5_files/test_dlas_96629_10000.hdf5'
hdf = h5py.File(hdf5_datafile, 'r')
headers = json.loads(hdf['meta'].value)['headers']
# Load ML
ml_abs = pred_to_tbl(pred_file)
# Vette
vette = ltu.loadjson(vette_file)
test_ml_idx = np.array(vette['test_idx'])
# Load DR5
dr5 = DLASurvey.load_SDSS_DR5()
all_dr5 = DLASurvey.load_SDSS_DR5(sample='all_sys')
# False positives
fpos = ml_abs['NHI'] >= 20.3 # Must be a DLA
imatched = np.where(test_ml_idx >= 0)[0]
match_val = test_ml_idx[imatched]
fpos[match_val] = False
print("There are {:d} total false positives".format(np.sum(fpos)))
# This nearly matches David's. Will run with his analysis.
fpos_in_dr5 = fpos.copy()
# Restrict on DR5
for idx in np.where(fpos_in_dr5)[0]:
# Convoluted indexing..
mlid = ml_abs['ids'][idx]
# Plate/Fiber
plate = headers[mlid]['PLATE']
fib = headers[mlid]['FIBER']
# Finally, match to DR5
dr5_sl = np.where((dr5.sightlines['PLATE'] == plate) &
(dr5.sightlines['FIB'] == fib))[0][0]
if (ml_abs['zabs'][idx] >= dr5.sightlines['Z_START'][dr5_sl]) & \
(ml_abs['zabs'][idx] <= dr5.sightlines['Z_END'][dr5_sl]):
pass
else:
fpos_in_dr5[idx] = False
print("Number of FP in DR5 analysis region = {:d}".format(np.sum(fpos_in_dr5)))
# How many match to DR5 SLLS?
slls = all_dr5.NHI < 20.3
slls_coord = all_dr5.coord[slls]
slls_zabs = all_dr5.zabs[slls]
nslls = 0
for idx in np.where(fpos_in_dr5)[0]:
# Convoluted indexing..
mlid = ml_abs['ids'][idx]
# RA/DEC
ra = headers[mlid]['RA_GROUP']
dec = headers[mlid]['DEC_GROUP']
coord = SkyCoord(ra=ra, dec=dec, unit='deg')
# Match coord
mt = coord.separation(slls_coord) < 3*u.arcsec
if np.any(mt):
# Match redshift
if np.min(np.abs(slls_zabs[mt] - ml_abs['zabs'][idx])) < 0.015:
nslls += 1
print("Number of FP that are SLLS in DR5 = {:d}".format(nslls))
low_NHI = ml_abs['NHI'][fpos_in_dr5] < 20.5
print("Number of FP that are NHI <= 20.5 = {:d}".format(np.sum(low_NHI)))
# Write out
fp_tbl = Table()
for key in ['ids', 'NHI', 'zabs', 'conf']:
fp_tbl[key] = ml_abs[key][fpos_in_dr5]
fp_tbl.write('test10k_false_pos.ascii', format='ascii.fixed_width', overwrite=True)
# Histogram
dr5_idx = np.where(fpos_in_dr5)
plt.clf()
ax = plt.gca()
ax.hist(ml_abs['conf'][dr5_idx])
plt.show()
def high_nhi_neg():
""" Examine High NHI false negatives in 10k test
"""
# Load ML
ml_abs = pred_to_tbl('../Vetting/data/test_dlas_96629_predictions.json.gz')
# Load Test
test_dlas = test_to_tbl('../Vetting/data/test_dlas_96629_10000.json.gz')
# Load vette
vette_10k = ltu.loadjson('../Vetting/vette_10k.json')
test_ml_idx = np.array(vette_10k['test_idx'])
misses = np.where(test_ml_idx == -99999)[0]
highNHI = test_dlas['NHI'][misses] > 21.2
high_tbl = test_dlas[misses[highNHI]]
# Write
high_tbl.write('test_highNHI_neg.ascii', format='ascii.fixed_width', overwrite=True)
def main(flg):
if (flg & 2**0): # Scorecard the ML run
score_ml_test() # 10k
#score_ml_test(outfile='vette_5k.json',
# test_file='data/test_dlas_5k96451.json.gz',
# pred_file='data/test_dlas_5k96451_predictions.json.gz')
if (flg & 2**1): # Generate list of high NHI
high_nhi_neg()
if (flg & 2**2): # Generate list of high NHI
examine_false_pos()
# Command line execution
if __name__ == '__main__':
if len(sys.argv) == 1: #
flg_vet = 0
flg_vet += 2**0 # Main scorecard
#flg_vet += 2**1 # High NHI
#flg_vet += 2**2 # False positives
else:
flg_vet = int(sys.argv[1])
main(flg_vet)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample data exhibiting audio summaries, via a waveform generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
import os.path
from absl import app
from absl import flags
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorboard.plugins.audio import summary
tf.compat.v1.disable_eager_execution()
FLAGS = flags.FLAGS
flags.DEFINE_string(
"logdir", "/tmp/audio_demo", "Directory into which to write TensorBoard data.",
)
flags.DEFINE_integer("steps", 50, "Number of frequencies of each waveform to generate.")
# Parameters for the audio output.
flags.DEFINE_integer("sample_rate", 44100, "Sample rate, in Hz.")
flags.DEFINE_float("duration", 2.0, "Duration of each waveform, in s.")
def _samples():
"""Compute how many samples should be included in each waveform."""
return int(FLAGS.sample_rate * FLAGS.duration)
def run(logdir, run_name, wave_name, wave_constructor):
"""Generate wave data of the given form.
The provided function `wave_constructor` should accept a scalar tensor
of type float32, representing the frequency (in Hz) at which to
construct a wave, and return a tensor of shape [1, _samples(), `n`]
representing audio data (for some number of channels `n`).
Waves will be generated at frequencies ranging from A4 to A5.
Arguments:
logdir: the top-level directory into which to write summary data
run_name: the name of this run; will be created as a subdirectory
under logdir
wave_name: the name of the wave being generated
wave_constructor: see above
"""
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(0)
# On each step `i`, we'll set this placeholder to `i`. This allows us
# to know "what time it is" at each step.
step_placeholder = tf.compat.v1.placeholder(tf.float32, shape=[])
# We want to linearly interpolate a frequency between A4 (440 Hz) and
# A5 (880 Hz).
with tf.name_scope("compute_frequency"):
f_min = 440.0
f_max = 880.0
t = step_placeholder / (FLAGS.steps - 1)
frequency = f_min * (1.0 - t) + f_max * t
# Let's log this frequency, just so that we can make sure that it's as
# expected.
tf.compat.v1.summary.scalar("frequency", frequency)
# Now, we pass this to the wave constructor to get our waveform. Doing
# so within a name scope means that any summaries that the wave
# constructor produces will be namespaced.
with tf.name_scope(wave_name):
waveform = wave_constructor(frequency)
# We also have the opportunity to annotate each audio clip with a
# label. This is a good place to include the frequency, because it'll
# be visible immediately next to the audio clip.
with tf.name_scope("compute_labels"):
samples = tf.shape(input=waveform)[0]
wave_types = tf.tile(["*Wave type:* `%s`." % wave_name], [samples])
frequencies = tf.strings.join(
[
"*Frequency:* ",
tf.tile([tf.as_string(frequency, precision=2)], [samples]),
" Hz.",
]
)
samples = tf.strings.join(
[
"*Sample:* ",
tf.as_string(tf.range(samples) + 1),
" of ",
tf.as_string(samples),
".",
]
)
labels = tf.strings.join([wave_types, frequencies, samples], separator=" ")
# We can place a description next to the summary in TensorBoard. This
# is a good place to explain what the summary represents, methodology
# for creating it, etc. Let's include the source code of the function
# that generated the wave.
source = "\n".join(
" %s" % line.rstrip() for line in inspect.getsourcelines(wave_constructor)[0]
)
description = "A wave of type `%r`, generated via:\n\n%s" % (wave_name, source,)
# Here's the crucial piece: we interpret this result as audio.
summary.op(
"waveform",
waveform,
FLAGS.sample_rate,
labels=labels,
display_name=wave_name,
description=description,
)
# Now, we can collect up all the summaries and begin the run.
summ = tf.compat.v1.summary.merge_all()
sess = tf.compat.v1.Session()
writer = tf.compat.v1.summary.FileWriter(os.path.join(logdir, run_name))
writer.add_graph(sess.graph)
sess.run(tf.compat.v1.global_variables_initializer())
for step in xrange(FLAGS.steps):
s = sess.run(summ, feed_dict={step_placeholder: float(step)})
writer.add_summary(s, global_step=step)
writer.close()
# Now, let's take a look at the kinds of waves that we can generate.
def sine_wave(frequency):
"""Emit a sine wave at the given frequency."""
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
ts = xs / FLAGS.sample_rate
return tf.sin(2 * math.pi * frequency * ts)
def square_wave(frequency):
"""Emit a square wave at the given frequency."""
# The square is just the sign of the sine!
return tf.sign(sine_wave(frequency))
def triangle_wave(frequency):
"""Emit a triangle wave at the given frequency."""
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
ts = xs / FLAGS.sample_rate
#
# A triangle wave looks like this:
#
# /\ /\
# / \ / \
# \ / \ /
# \/ \/
#
# If we look at just half a period (the first four slashes in the
# diagram above), we can see that it looks like a transformed absolute
# value function.
#
# Let's start by computing the times relative to the start of each
# half-wave pulse (each individual "mountain" or "valley", of which
# there are four in the above diagram).
half_pulse_index = ts * (frequency * 2)
half_pulse_angle = half_pulse_index % 1.0 # in [0, 1]
#
# Now, we can see that each positive half-pulse ("mountain") has
# amplitude given by A(z) = 0.5 - abs(z - 0.5), and then normalized:
absolute_amplitude = (0.5 - tf.abs(half_pulse_angle - 0.5)) / 0.5
#
# But every other half-pulse is negative, so we should invert these.
half_pulse_parity = tf.sign(1 - (half_pulse_index % 2.0))
amplitude = half_pulse_parity * absolute_amplitude
#
# This is precisely the desired result, so we're done!
return amplitude
# If we want to get fancy, we can use our above waves as primitives to
# build more interesting waves.
def bisine_wave(frequency):
"""Emit two sine waves, in stereo at different octaves."""
#
# We can first our existing sine generator to generate two different
# waves.
f_hi = frequency
f_lo = frequency / 2.0
with tf.name_scope("hi"):
sine_hi = sine_wave(f_hi)
with tf.name_scope("lo"):
sine_lo = sine_wave(f_lo)
#
# Now, we have two tensors of shape [1, _samples(), 1]. By concatenating
# them along axis 2, we get a tensor of shape [1, _samples(), 2]---a
# stereo waveform.
return tf.concat([sine_lo, sine_hi], axis=2)
def bisine_wahwah_wave(frequency):
"""Emit two sine waves with balance oscillating left and right."""
#
# This is clearly intended to build on the bisine wave defined above,
# so we can start by generating that.
waves_a = bisine_wave(frequency)
#
# Then, by reversing axis 2, we swap the stereo channels. By mixing
# this with `waves_a`, we'll be able to create the desired effect.
waves_b = tf.reverse(waves_a, axis=[2])
#
# Let's have the balance oscillate from left to right four times.
iterations = 4
#
# Now, we compute the balance for each sample: `ts` has values
# in [0, 1] that indicate how much we should use `waves_a`.
xs = tf.reshape(tf.range(_samples(), dtype=tf.float32), [1, _samples(), 1])
thetas = xs / _samples() * iterations
ts = (tf.sin(math.pi * 2 * thetas) + 1) / 2
#
# Finally, we can mix the two together, and we're done.
wave = ts * waves_a + (1.0 - ts) * waves_b
#
# Alternately, we can make the effect more pronounced by exaggerating
# the sample data. Let's emit both variations.
exaggerated_wave = wave ** 3.0
return tf.concat([wave, exaggerated_wave], axis=0)
def run_all(logdir, verbose=False):
"""Generate waves of the shapes defined above.
Arguments:
logdir: the directory into which to store all the runs' data
verbose: if true, print out each run's name as it begins
"""
waves = [
sine_wave,
square_wave,
triangle_wave,
bisine_wave,
bisine_wahwah_wave,
]
for (i, wave_constructor) in enumerate(waves):
wave_name = wave_constructor.__name__
run_name = "%02d_%s" % (i + 1, wave_name)
if verbose:
print("--- Running: %s" % run_name)
run(logdir, run_name, wave_name, wave_constructor)
def main(unused_argv):
print("Saving output to %s." % FLAGS.logdir)
run_all(FLAGS.logdir, verbose=True)
print("Done. Output saved to %s." % FLAGS.logdir)
if __name__ == "__main__":
app.run(main)
|
|
from __future__ import division # for proper float division
import os
import sys
import math
import time
import types
import functools
import random
import json
from collections import OrderedDict
import numpy as np
from director import ikconstraints
from director import ikconstraintencoder
from director import ikparameters
from director.fieldcontainer import FieldContainer
from director.utime import getUtime
from director import lcmUtils
from director import transformUtils
class PlannerPublisher(object):
def __init__(self, ikPlanner, affordanceManager):
self.ikPlanner = ikPlanner
self.affordanceManager = affordanceManager
self.jointNames = list(self.ikPlanner.jointController.jointNames)
self.jointLimits = {}
self.poses = {}
for jointName in self.jointNames:
self.jointLimits[jointName] = list(self.ikPlanner.robotModel.model.getJointLimits(jointName))
self._setup()
def _setup(self):
pass
def updateJointLimits(self, limitData):
for jointName, epsilon, jointPosition in limitData:
if epsilon < 0:
self.jointLimits[jointName][0]=jointPosition
else:
self.jointLimits[jointName][1]=jointPosition
def setupFields(self, constraints, ikParameters, positionCosts, nominalPoseName="", seedPoseName="", endPoseName=""):
poses = ikconstraintencoder.getPlanPoses(constraints, self.ikPlanner)
poses.update(self.poses)
poses['q_nom'] = list(self.ikPlanner.jointController.getPose('q_nom'))
fields = FieldContainer(
utime = getUtime(),
poses = poses,
constraints = constraints,
seedPose = seedPoseName,
nominalPose = nominalPoseName,
endPose = endPoseName,
jointNames = self.jointNames,
jointLimits = self.jointLimits,
positionCosts = positionCosts,
affordances = self.processAffordances(),
options = ikParameters,
)
return fields
def processIK(self, constraints, ikParameters, positionCosts, nominalPoseName="", seedPoseName=""):
raise Exception('not implemented')
def processTraj(self, constraints, ikParameters, positionCosts, nominalPoseName="", seedPoseName="", endPoseName=""):
raise Exception('not implemented')
def processAddPose(self, pose, poseName):
self.poses[poseName] = list(pose)
def processAffordances(self):
affs = self.affordanceManager.getCollisionAffordances()
s='['
first=True
for aff in affs:
des=aff.getDescription()
classname=des['classname'];
if first:
s+='{'
else:
s+='\n,{'
first=False
s+='"classname":"'+classname+'"'
s+=',"name":"'+des['Name']+'"'
s+=',"uuid":"'+des['uuid']+'"'
s+=',"pose": {"position":{"__ndarray__":'+repr(des['pose'][0].tolist())+'},"quaternion":{"__ndarray__":'+repr(des['pose'][1].tolist())+'}}'
if self.affordanceManager.affordanceUpdater is not None: # attached collision object / frameSync
if des['Name'] in self.affordanceManager.affordanceUpdater.attachedAffordances:
s+=',"attachedTo":"'+self.affordanceManager.affordanceUpdater.attachedAffordances[des['Name']]+'"'
else: # it's not attached
s+=',"attachedTo":"__world__"' # __world__ means it's a fixed collision object (sometimes called world or map - we use __world__ here)
else: # no affordanceUpdater - so no attached collision objects either
s+=',"attachedTo":"__world__"'
if classname=='MeshAffordanceItem':
s+=',"filename":"'+aff.getMeshManager().getFilesystemFilename(des['Filename'])+'"'
if classname=='SphereAffordanceItem':
s+=',"radius":'+repr(des['Radius'])
if classname=='CylinderAffordanceItem' or classname=='CapsuleAffordanceItem':
s+=',"radius":'+repr(des['Radius'])
s+=',"length":'+repr(des['Length'])
if classname=='BoxAffordanceItem':
s+=',"dimensions":'+repr(des['Dimensions'])
if classname=='CapsuleRingAffordanceItem':
s+=',"radius":'+repr(des['Radius'])
s+=',"tube_radius":'+repr(des['Tube Radius'])
s+=',"segments":'+repr(des['Segments'])
s+='}'
s=s+']'
return s
class DummyPlannerPublisher(PlannerPublisher):
pass
class MatlabDrakePlannerPublisher(PlannerPublisher):
def processAddPose(self, pose, poseName):
super(MatlabDrakePlannerPublisher, self).processAddPose(pose, poseName)
self.ikServer.sendPoseToServer(pose, poseName)
def processIK(self, constraints, ikParameters, positionCosts, nominalPoseName="", seedPoseName=""):
endPose, info = self.ikServer.runIk(constraints, ikParameters, nominalPostureName=nominalPoseName, seedPostureName=seedPoseName)
return endPose, info
def processTraj(self, constraints, ikParameters, positionCosts, nominalPoseName="", seedPoseName="", endPoseName=""):
listener = self.ikPlanner.getManipPlanListener()
info = self.ikServer.runIkTraj(constraints, poseStart=seedPoseName, poseEnd=endPoseName, nominalPose=nominalPoseName, ikParameters=ikParameters, additionalTimeSamples=self.ikPlanner.additionalTimeSamples, graspToHandLinkFrame=self.ikPlanner.newGraspToHandFrame(ikParameters.rrtHand))
plan = listener.waitForResponse(timeout=12000)
listener.finish()
return plan, info
class ExoticaPlannerPublisher(PlannerPublisher):
def setupMessage(self, fields):
# todo, exotica should migrate to new style option names.
# for backward compatibility, override the options field here
# and insert jointLimits to options list
options = OrderedDict(self.ikPlanner.getIkOptions()._properties)
options['jointLimits'] = fields.jointLimits
import drc as lcmdrc
msg = lcmdrc.exotica_planner_request_t()
msg.utime = fields.utime
msg.poses = json.dumps(fields.poses)
msg.constraints = ikconstraintencoder.encodeConstraints(fields.constraints)
msg.seed_pose = fields.seedPose
msg.nominal_pose = fields.nominalPose
msg.end_pose = fields.endPose
msg.joint_names = json.dumps(fields.jointNames)
msg.affordances = fields.affordances
msg.options = json.dumps(options)
return msg
def processIK(self, constraints, ikParameters, positionCosts, nominalPoseName="", seedPoseName=""):
fields = self.setupFields(constraints, ikParameters, positionCosts, nominalPoseName, seedPoseName)
msg = self.setupMessage(fields)
listener = self.ikPlanner.getManipIKListener()
lcmUtils.publish('IK_REQUEST', msg)
ikplan = listener.waitForResponse(timeout=12000)
listener.finish()
endPose = [0] * self.ikPlanner.jointController.numberOfJoints
if ikplan.num_states>0:
endPose[len(endPose)-len(ikplan.plan[ikplan.num_states-1].joint_position):] = ikplan.plan[ikplan.num_states-1].joint_position
info=ikplan.plan_info[ikplan.num_states-1]
else:
info = -1
self.ikPlanner.ikServer.infoFunc(info)
return endPose, info
def processTraj(self, constraints, ikParameters, positionCosts, nominalPoseName="", seedPoseName="", endPoseName=""):
# Temporary fix / HACK / TODO (should be done in exotica_json)
largestTspan = [0, 0]
for constraintIndex, _ in enumerate(constraints):
# Get tspan extend to normalise time-span
if np.isfinite(constraints[constraintIndex].tspan[0]) and np.isfinite(constraints[constraintIndex].tspan[1]):
largestTspan[0] = constraints[constraintIndex].tspan[0] if (constraints[constraintIndex].tspan[0] < largestTspan[0]) else largestTspan[0]
largestTspan[1] = constraints[constraintIndex].tspan[1] if (constraints[constraintIndex].tspan[1] > largestTspan[1]) else largestTspan[1]
# Temporary fix / HACK/ TODO to normalise time spans
for constraintIndex, _ in enumerate(constraints):
if np.isfinite(constraints[constraintIndex].tspan[0]) and np.isfinite(constraints[constraintIndex].tspan[1]):
if largestTspan[1] != 0:
constraints[constraintIndex].tspan[0] = constraints[constraintIndex].tspan[0] / largestTspan[1]
constraints[constraintIndex].tspan[1] = constraints[constraintIndex].tspan[1] / largestTspan[1]
listener = self.ikPlanner.getManipPlanListener()
fields = self.setupFields(constraints, ikParameters, positionCosts, nominalPoseName, seedPoseName, endPoseName)
msg = self.setupMessage(fields)
lcmUtils.publish('PLANNER_REQUEST', msg)
lastManipPlan = listener.waitForResponse(timeout=20000)
listener.finish()
self.ikPlanner.ikServer.infoFunc(lastManipPlan.plan_info[0])
return lastManipPlan, lastManipPlan.plan_info[0]
|
|
# -*- coding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
import datetime
try:
import PyICU as pyicu
except:
pyicu = None
def lcase(x):
return x.lower()
class pdtLocale_base(object):
"""
default values for Locales
"""
locale_keys = set([
'MonthOffsets', 'Months', 'WeekdayOffsets', 'Weekdays',
'dateFormats', 'dateSep', 'dayOffsets', 'dp_order',
'localeID', 'meridian', 'Modifiers', 're_sources', 're_values',
'shortMonths', 'shortWeekdays', 'timeFormats', 'timeSep', 'units',
'uses24', 'usesMeridian', 'numbers', 'small', 'magnitude', 'ignore'])
def __init__(self):
self.localeID = None # don't use a unicode string
self.dateSep = [ '/', '.' ]
self.timeSep = [ ':' ]
self.meridian = [ 'AM', 'PM' ]
self.usesMeridian = True
self.uses24 = True
self.WeekdayOffsets = {}
self.MonthOffsets = {}
# always lowercase any lookup values - helper code expects that
self.Weekdays = [ 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday',
]
self.shortWeekdays = [ 'mon', 'tues', 'wed',
'thu', 'fri', 'sat', 'sun',
]
self.Months = [ 'january', 'february', 'march',
'april', 'may', 'june',
'july', 'august', 'september',
'october', 'november', 'december',
]
self.shortMonths = [ 'jan', 'feb', 'mar',
'apr', 'may', 'jun',
'jul', 'aug', 'sep',
'oct', 'nov', 'dec',
]
# use the same formats as ICU by default
self.dateFormats = { 'full': 'EEEE, MMMM d, yyyy',
'long': 'MMMM d, yyyy',
'medium': 'MMM d, yyyy',
'short': 'M/d/yy',
}
self.timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a z',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
self.dp_order = [ 'm', 'd', 'y' ]
# Used to parse expressions like "in 5 hours"
self.numbers = { 'zero': 0, 'one': 1, 'a': 1, 'an': 1, 'two': 2, 'three': 3,
'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8,
'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12,
'thirteen': 13, 'fourteen': 14, 'fifteen': 15, 'sixteen': 16,
'seventeen': 17, 'eighteen': 18, 'nineteen': 19,
'twenty': 20 }
# this will be added to re_values later
self.units = { 'seconds': [ 'second', 'seconds', 'sec', 's' ],
'minutes': [ 'minute', 'minutes', 'min', 'm' ],
'hours': [ 'hour', 'hours', 'hr', 'h' ],
'days': [ 'day', 'days', 'dy', 'd' ],
'weeks': [ 'week', 'weeks', 'wk', 'w' ],
'months': [ 'month', 'months', 'mth' ],
'years': [ 'year', 'years', 'yr', 'y' ],
}
# text constants to be used by later regular expressions
self.re_values = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
self.Modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': -1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'end of': 0,
'this': 0,
'eod': 1,
'eom': 1,
'eoy': 1,
}
self.dayOffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
self.re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'afternoon': { 'hr': 13, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
self.small = {'zero': 0,
'one': 1,
'a': 1,
'an': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
self.magnitude = {'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
self.ignore = ('and', ',')
class pdtLocale_icu(pdtLocale_base):
"""
Create a locale from pyICU
"""
def __init__(self, localeID):
super( pdtLocale_icu, self ).__init__()
self.icu = None
if pyicu is not None:
if localeID is None:
localeID = 'en_US'
self.icu = pyicu.Locale(localeID)
if self.icu is not None:
# grab spelled out format of all numbers from 0 to 100
rbnf = pyicu.RuleBasedNumberFormat(pyicu.URBNFRuleSetTag.SPELLOUT, self.icu)
try:
self.numbers = dict([(rbnf.format(i), i) for i in xrange(0, 100)])
except NameError:
self.numbers = dict([(rbnf.format(i), i) for i in range(0, 100)])
self.symbols = pyicu.DateFormatSymbols(self.icu)
# grab ICU list of weekdays, skipping first entry which
# is always blank
wd = list(map(lcase, self.symbols.getWeekdays()[1:]))
swd = list(map(lcase, self.symbols.getShortWeekdays()[1:]))
# store them in our list with Monday first (ICU puts Sunday first)
self.Weekdays = wd[1:] + wd[0:1]
self.shortWeekdays = swd[1:] + swd[0:1]
self.Months = list(map(lcase, self.symbols.getMonths()))
self.shortMonths = list(map(lcase, self.symbols.getShortMonths()))
self.icu_df = { 'full': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kFull, self.icu),
'long': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kLong, self.icu),
'medium': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kMedium, self.icu),
'short': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kShort, self.icu),
}
self.icu_tf = { 'full': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kFull, self.icu),
'long': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kLong, self.icu),
'medium': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kMedium, self.icu),
'short': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kShort, self.icu),
}
self.dateFormats = { 'full': self.icu_df['full'].toPattern(),
'long': self.icu_df['long'].toPattern(),
'medium': self.icu_df['medium'].toPattern(),
'short': self.icu_df['short'].toPattern(),
}
self.timeFormats = { 'full': self.icu_tf['full'].toPattern(),
'long': self.icu_tf['long'].toPattern(),
'medium': self.icu_tf['medium'].toPattern(),
'short': self.icu_tf['short'].toPattern(),
}
am = ''
pm = ''
ts = ''
# ICU doesn't seem to provide directly the date or time seperator
# so we have to figure it out
o = self.icu_tf['short']
s = self.timeFormats['short']
self.usesMeridian = 'a' in s
self.uses24 = 'H' in s
# '11:45 AM' or '11:45'
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
# ': AM' or ':'
s = s.replace('11', '').replace('45', '')
if len(s) > 0:
ts = s[0]
if self.usesMeridian:
# '23:45 AM' or '23:45'
am = s[1:].strip()
s = o.format(datetime.datetime(2003, 10, 30, 23, 45))
if self.uses24:
s = s.replace('23', '')
else:
s = s.replace('11', '')
# 'PM' or ''
pm = s.replace('45', '').replace(ts, '').strip()
self.timeSep = [ ts ]
self.meridian = [ am, pm ]
o = self.icu_df['short']
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '')
if len(s) > 0:
ds = s[0]
else:
ds = '/'
self.dateSep = [ ds ]
s = self.dateFormats['short']
l = s.lower().split(ds)
dp_order = []
for s in l:
if len(s) > 0:
dp_order.append(s[:1])
self.dp_order = dp_order
class pdtLocale_en(pdtLocale_base):
"""
en_US Locale
"""
def __init__(self):
super( pdtLocale_en, self ).__init__()
self.localeID = 'en_US' # don't use a unicode string
self.uses24 = False
class pdtLocale_au(pdtLocale_base):
"""
en_AU Locale
"""
def __init__(self):
super( pdtLocale_au, self ).__init__()
self.localeID = 'en_A' # don't use a unicode string
self.dateSep = [ '-', '/' ]
self.uses24 = False
self.dateFormats['full'] = 'EEEE, d MMMM yyyy'
self.dateFormats['long'] = 'd MMMM yyyy'
self.dateFormats['medium'] = 'dd/MM/yyyy'
self.dateFormats['short'] = 'd/MM/yy'
self.timeFormats['long'] = self.timeFormats['full']
self.dp_order = [ 'd', 'm', 'y' ]
class pdtLocale_es(pdtLocale_base):
"""
es Locale
Note that I don't speak Spanish so many of the items below are still in English
"""
def __init__(self):
super( pdtLocale_es, self ).__init__()
self.localeID = 'es' # don't use a unicode string
self.dateSep = [ '/' ]
self.usesMeridian = False
self.uses24 = True
self.Weekdays = [ 'lunes', 'martes', 'mi\xe9rcoles',
'jueves', 'viernes', 's\xe1bado', 'domingo',
]
self.shortWeekdays = [ 'lun', 'mar', 'mi\xe9',
'jue', 'vie', 's\xe1b', 'dom',
]
self.Months = [ 'enero', 'febrero', 'marzo',
'abril', 'mayo', 'junio',
'julio', 'agosto', 'septiembre',
'octubre', 'noviembre', 'diciembre'
]
self.shortMonths = [ 'ene', 'feb', 'mar',
'abr', 'may', 'jun',
'jul', 'ago', 'sep',
'oct', 'nov', 'dic'
]
self.dateFormats['full'] = "EEEE d' de 'MMMM' de 'yyyy"
self.dateFormats['long'] = "d' de 'MMMM' de 'yyyy"
self.dateFormats['medium'] = "dd-MMM-yy"
self.dateFormats['short'] = "d/MM/yy"
self.timeFormats['full'] = "HH'H'mm' 'ss z"
self.timeFormats['long'] = "HH:mm:ss z"
self.timeFormats['medium'] = "HH:mm:ss"
self.timeFormats['short'] = "HH:mm"
self.dp_order = [ 'd', 'm', 'y' ]
class pdtLocale_ptBR(pdtLocale_base):
"""
pt_BR Locale
"""
def __init__(self):
super( pdtLocale_ptBR, self ).__init__()
self.localeID = 'pt_BR' # don't use a unicode string
self.dateSep = [ '/' ]
self.usesMeridian = False
self.uses24 = True
self.Weekdays = [ 'segunda-feira', 'ter\xe7a-feira', 'quarta-feira',
'quinta-feira', 'sexta-feira', 's\xe1bado', 'domingo',
]
self.shortWeekdays = [ 'seg', 'ter', 'qua',
'qui', 'sex', 's\xe1b', 'dom',
]
self.Months = [ 'janeiro', 'fevereiro', 'mar\xe7o',
'abril', 'maio', 'junho',
'julho', 'agosto', 'setembro',
'outubro', 'novembro', 'dezembro'
]
self.shortMonths = [ 'jan', 'fev', 'mar',
'abr', 'mai', 'jun',
'jul', 'ago', 'set',
'out', 'nov', 'dez'
]
self.dateFormats['full'] = "EEEE, d' de 'MMMM' de 'yyyy"
self.dateFormats['long'] = "d' de 'MMMM' de 'yyyy"
self.dateFormats['medium'] = "dd-MM-yy"
self.dateFormats['short'] = "dd/MM/yyyy"
self.timeFormats['full'] = "HH'H'mm' 'ss z"
self.timeFormats['long'] = "HH:mm:ss z"
self.timeFormats['medium'] = "HH:mm:ss"
self.timeFormats['short'] = "HH:mm"
self.dp_order = [ 'd', 'm', 'y' ]
self.units['seconds'] = [ 'segundo', 'seg', 's']
self.units['minutes'] = [ 'minuto', 'min', 'm']
self.units['days'] = [ 'dia', 'dias', 'd']
self.units['months'] = [ 'm\xeas', 'meses']
class pdtLocale_de(pdtLocale_base):
"""
de_DE Locale constants
Contributed by Debian parsedatetime package maintainer Bernd Zeimetz <bzed@debian.org>
"""
def __init__(self):
super( pdtLocale_de, self ).__init__()
self.localeID = 'de_DE' # don't use a unicode string
self.dateSep = [ '.' ]
self.timeSep = [ ':' ]
self.meridian = [ ]
self.usesMeridian = False
self.uses24 = True
self.Weekdays = [ 'montag', 'dienstag', 'mittwoch',
'donnerstag', 'freitag', 'samstag', 'sonntag',
]
self.shortWeekdays = [ 'mo', 'di', 'mi',
'do', 'fr', 'sa', 'so',
]
self.Months = [ 'januar', 'februar', 'm\xe4rz',
'april', 'mai', 'juni',
'juli', 'august', 'september',
'oktober', 'november', 'dezember',
]
self.shortMonths = [ 'jan', 'feb', 'mrz',
'apr', 'mai', 'jun',
'jul', 'aug', 'sep',
'okt', 'nov', 'dez',
]
self.dateFormats['full'] = 'EEEE, d. MMMM yyyy'
self.dateFormats['long'] = 'd. MMMM yyyy'
self.dateFormats['medium'] = 'dd.MM.yyyy'
self.dateFormats['short'] = 'dd.MM.yy'
self.timeFormats['full'] = 'HH:mm:ss v'
self.timeFormats['long'] = 'HH:mm:ss z'
self.timeFormats['medium'] = 'HH:mm:ss'
self.timeFormats['short'] = 'HH:mm'
self.dp_order = [ 'd', 'm', 'y' ]
self.units['seconds'] = [ 'sekunden', 'sek', 's' ]
self.units['minutes'] = [ 'minuten', 'min' , 'm' ]
self.units['hours'] = [ 'stunden', 'std', 'h' ]
self.units['days'] = [ 'tag', 'tage', 't' ]
self.units['weeks'] = [ 'wochen', 'w' ]
self.units['months'] = [ 'monat', 'monate' ] #the short version would be a capital M,
#as I understand it we can't distinguish
#between m for minutes and M for months.
self.units['years'] = [ 'jahr', 'jahre', 'j' ]
self.re_values['specials'] = 'am|dem|der|im|in|den|zum'
self.re_values['timeseperator'] = ':'
self.re_values['rangeseperator'] = '-'
self.re_values['daysuffix'] = ''
self.re_values['qunits'] = 'h|m|s|t|w|m|j'
self.re_values['now'] = [ 'jetzt' ]
# Used to adjust the returned date before/after the source
#still looking for insight on how to translate all of them to german.
self.Modifiers['from'] = 1
self.Modifiers['before'] = -1
self.Modifiers['after'] = 1
self.Modifiers['vergangener'] = -1
self.Modifiers['vorheriger'] = -1
self.Modifiers['prev'] = -1
self.Modifiers['letzter'] = -1
self.Modifiers['n\xe4chster'] = 1
self.Modifiers['dieser'] = 0
self.Modifiers['previous'] = -1
self.Modifiers['in a'] = 2
self.Modifiers['end of'] = 0
self.Modifiers['eod'] = 0
self.Modifiers['eo'] = 0
#morgen/abermorgen does not work, see http://code.google.com/p/parsedatetime/issues/detail?id=19
self.dayOffsets['morgen'] = 1
self.dayOffsets['heute'] = 0
self.dayOffsets['gestern'] = -1
self.dayOffsets['vorgestern'] = -2
self.dayOffsets['\xfcbermorgen'] = 2
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
self.re_sources['mittag'] = { 'hr': 12, 'mn': 0, 'sec': 0 }
self.re_sources['mittags'] = { 'hr': 12, 'mn': 0, 'sec': 0 }
self.re_sources['mittagessen'] = { 'hr': 12, 'mn': 0, 'sec': 0 }
self.re_sources['morgen'] = { 'hr': 6, 'mn': 0, 'sec': 0 }
self.re_sources['morgens'] = { 'hr': 6, 'mn': 0, 'sec': 0 }
self.re_sources[r'fr\e4hst\xe4ck'] = { 'hr': 8, 'mn': 0, 'sec': 0 }
self.re_sources['abendessen'] = { 'hr': 19, 'mn': 0, 'sec': 0 }
self.re_sources['abend'] = { 'hr': 18, 'mn': 0, 'sec': 0 }
self.re_sources['abends'] = { 'hr': 18, 'mn': 0, 'sec': 0 }
self.re_sources['mitternacht'] = { 'hr': 0, 'mn': 0, 'sec': 0 }
self.re_sources['nacht'] = { 'hr': 21, 'mn': 0, 'sec': 0 }
self.re_sources['nachts'] = { 'hr': 21, 'mn': 0, 'sec': 0 }
self.re_sources['heute abend'] = { 'hr': 21, 'mn': 0, 'sec': 0 }
self.re_sources['heute nacht'] = { 'hr': 21, 'mn': 0, 'sec': 0 }
self.re_sources['feierabend'] = { 'hr': 17, 'mn': 0, 'sec': 0 }
class pdtLocale_nl(pdtLocale_base):
"""
nl_NL Locale constants
Contributed by Dirkjan Krijnders <dirkjan@krijnders.net>
"""
def __init__(self):
super( pdtLocale_nl, self ).__init__()
self.localeID = 'nl_NL' # don't use a unicode string
self.dateSep = [ '-' , '/']
self.timeSep = [ ':' ]
self.meridian = [ ]
self.usesMeridian = False
self.uses24 = True
self.Weekdays = [ 'maandag', 'dinsdag', 'woensdag',
'donderdag', 'vrijdag', 'zaterdag', 'zondag',
]
self.shortWeekdays = [ 'ma', 'di', 'wo',
'do', 'vr', 'za', 'zo',
]
self.Months = [ 'januari', 'februari', 'maart',
'april', 'mei', 'juni',
'juli', 'augustus', 'september',
'oktober', 'november', 'december',
]
self.shortMonths = [ 'jan', 'feb', 'mar',
'apr', 'mei', 'jun',
'jul', 'aug', 'sep',
'okt', 'nov', 'dec',
]
self.dateFormats['full'] = 'EEEE, dd MMMM yyyy'
self.dateFormats['long'] = 'dd MMMM yyyy'
self.dateFormats['medium'] = 'dd-MM-yyyy'
self.dateFormats['short'] = 'dd-MM-yy'
self.timeFormats['full'] = 'HH:mm:ss v'
self.timeFormats['long'] = 'HH:mm:ss z'
self.timeFormats['medium'] = 'HH:mm:ss'
self.timeFormats['short'] = 'HH:mm'
self.dp_order = [ 'd', 'm', 'y' ]
self.units['seconds'] = [ 'secunden', 'sec', 's' ]
self.units['minutes'] = [ 'minuten', 'min' , 'm' ]
self.units['hours'] = [ 'uren', 'uur', 'h' ]
self.units['days'] = [ 'dagen', 'dag', 'd' ]
self.units['weeks'] = [ 'weken', 'w' ]
self.units['months'] = [ 'maanden', 'maand' ] #the short version would be a capital M,
#as I understand it we can't distinguish
#between m for minutes and M for months.
self.units['years'] = [ 'jaar', 'jaren', 'j' ]
self.re_values['specials'] = 'om'
self.re_values['timeseperator'] = ':'
self.re_values['rangeseperator'] = '-'
self.re_values['daysuffix'] = ' |de'
self.re_values['qunits'] = 'h|m|s|d|w|m|j'
self.re_values['now'] = [ 'nu' ]
# Used to adjust the returned date before/after the source
#still looking for insight on how to translate all of them to german.
self.Modifiers['vanaf'] = 1
self.Modifiers['voor'] = -1
self.Modifiers['na'] = 1
self.Modifiers['vorige'] = -1
self.Modifiers['eervorige'] = -1
self.Modifiers['prev'] = -1
self.Modifiers['laastste'] = -1
self.Modifiers['volgende'] = 1
self.Modifiers['deze'] = 0
self.Modifiers['vorige'] = -1
self.Modifiers['over'] = 2
self.Modifiers['eind van'] = 0
#morgen/abermorgen does not work, see http://code.google.com/p/parsedatetime/issues/detail?id=19
self.dayOffsets['morgen'] = 1
self.dayOffsets['vandaag'] = 0
self.dayOffsets['gisteren'] = -1
self.dayOffsets['eergisteren'] = -2
self.dayOffsets['overmorgen'] = 2
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
self.re_sources['middag'] = { 'hr': 12, 'mn': 0, 'sec': 0 }
self.re_sources['vanmiddag'] = { 'hr': 12, 'mn': 0, 'sec': 0 }
self.re_sources['lunch'] = { 'hr': 12, 'mn': 0, 'sec': 0 }
self.re_sources['morgen'] = { 'hr': 6, 'mn': 0, 'sec': 0 }
self.re_sources['\'s morgens'] = { 'hr': 6, 'mn': 0, 'sec': 0 }
self.re_sources['ontbijt'] = { 'hr': 8, 'mn': 0, 'sec': 0 }
self.re_sources['avondeten'] = { 'hr': 19, 'mn': 0, 'sec': 0 }
self.re_sources['avond'] = { 'hr': 18, 'mn': 0, 'sec': 0 }
self.re_sources['avonds'] = { 'hr': 18, 'mn': 0, 'sec': 0 }
self.re_sources['middernacht'] = { 'hr': 0, 'mn': 0, 'sec': 0 }
self.re_sources['nacht'] = { 'hr': 21, 'mn': 0, 'sec': 0 }
self.re_sources['nachts'] = { 'hr': 21, 'mn': 0, 'sec': 0 }
self.re_sources['vanavond'] = { 'hr': 21, 'mn': 0, 'sec': 0 }
self.re_sources['vannacht'] = { 'hr': 21, 'mn': 0, 'sec': 0 }
|
|
# original source from https://github.com/thesharp/daemonize
import fcntl
import os
import pwd
import grp
import sys
import signal
import resource
import logging
import atexit
from logging import handlers
class Daemonize(object):
""" Daemonize object
Object constructor expects three arguments:
- app: contains the application name which will be sent to syslog.
- pid: path to the pidfile.
- action: your custom function which will be executed after daemonization.
- keep_fds: optional list of fds which should not be closed.
- auto_close_fds: optional parameter to not close opened fds.
- privileged_action: action that will be executed before drop privileges if user or
group parameter is provided.
If you want to transfer anything from privileged_action to action, such as
opened privileged file descriptor, you should return it from
privileged_action function and catch it inside action function.
- user: drop privileges to this user if provided.
- group: drop privileges to this group if provided.
- verbose: send debug messages to logger if provided.
- logger: use this logger object instead of creating new one, if provided.
"""
def __init__(self, app, pid, action, keep_fds=None, auto_close_fds=True, privileged_action=None, user=None, group=None, verbose=False, logger=None):
self.app = app
self.pid = pid
self.action = action
self.keep_fds = keep_fds or []
self.privileged_action = privileged_action or (lambda: ())
self.user = user
self.group = group
self.logger = logger
self.verbose = verbose
self.auto_close_fds = auto_close_fds
def sigterm(self, signum, frame):
""" sigterm method
These actions will be done after SIGTERM.
"""
self.logger.warn("Caught signal %s. Stopping daemon." % signum)
os.remove(self.pid)
sys.exit(0)
def exit(self):
""" exit method
Cleanup pid file at exit.
"""
self.logger.warn("Stopping daemon.")
os.remove(self.pid)
sys.exit(0)
def start(self):
""" start method
Main daemonization process.
"""
# If pidfile already exists, we should read pid from there; to overwrite it, if locking
# will fail, because locking attempt somehow purges the file contents.
if os.path.isfile(self.pid):
with open(self.pid, "r") as old_pidfile:
old_pid = old_pidfile.read()
# Create a lockfile so that only one instance of this daemon is running at any time.
try:
lockfile = open(self.pid, "w")
except IOError:
print("Unable to create the pidfile.")
sys.exit(1)
try:
# Try to get an exclusive lock on the file. This will fail if another process has the file
# locked.
fcntl.flock(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
print("Unable to lock on the pidfile.")
# We need to overwrite the pidfile if we got here.
with open(self.pid, "w") as pidfile:
pidfile.write(old_pid)
sys.exit(1)
# Fork, creating a new process for the child.
process_id = os.fork()
if process_id < 0:
# Fork error. Exit badly.
sys.exit(1)
elif process_id != 0:
# This is the parent process. Exit.
sys.exit(0)
# This is the child process. Continue.
# Stop listening for signals that the parent process receives.
# This is done by getting a new process id.
# setpgrp() is an alternative to setsid().
# setsid puts the process in a new parent group and detaches its controlling terminal.
process_id = os.setsid()
if process_id == -1:
# Uh oh, there was a problem.
sys.exit(1)
# Add lockfile to self.keep_fds.
self.keep_fds.append(lockfile.fileno())
# Close all file descriptors, except the ones mentioned in self.keep_fds.
devnull = "/dev/null"
if hasattr(os, "devnull"):
# Python has set os.devnull on this system, use it instead as it might be different
# than /dev/null.
devnull = os.devnull
if self.auto_close_fds:
for fd in range(3, resource.getrlimit(resource.RLIMIT_NOFILE)[0]):
if fd not in self.keep_fds:
try:
os.close(fd)
except OSError:
pass
devnull_fd = os.open(devnull, os.O_RDWR)
os.dup2(devnull_fd, 0)
os.dup2(devnull_fd, 1)
os.dup2(devnull_fd, 2)
if self.logger is None:
# Initialize logging.
self.logger = logging.getLogger(self.app)
self.logger.setLevel(logging.DEBUG)
# Display log messages only on defined handlers.
self.logger.propagate = False
# Initialize syslog.
# It will correctly work on OS X, Linux and FreeBSD.
if sys.platform == "darwin":
syslog_address = "/var/run/syslog"
else:
syslog_address = "/dev/log"
# We will continue with syslog initialization only if actually have such capabilities
# on the machine we are running this.
if os.path.isfile(syslog_address):
syslog = handlers.SysLogHandler(syslog_address)
if self.verbose:
syslog.setLevel(logging.DEBUG)
else:
syslog.setLevel(logging.INFO)
# Try to mimic to normal syslog messages.
formatter = logging.Formatter("%(asctime)s %(name)s: %(message)s",
"%b %e %H:%M:%S")
syslog.setFormatter(formatter)
self.logger.addHandler(syslog)
# Set umask to default to safe file permissions when running as a root daemon. 027 is an
# octal number which we are typing as 0o27 for Python3 compatibility.
os.umask(0o27)
# Change to a known directory. If this isn't done, starting a daemon in a subdirectory that
# needs to be deleted results in "directory busy" errors.
os.chdir("/")
# Execute privileged action
privileged_action_result = self.privileged_action()
if not privileged_action_result:
privileged_action_result = []
# Change gid
if self.group:
try:
gid = grp.getgrnam(self.group).gr_gid
except KeyError:
self.logger.error("Group {0} not found".format(self.group))
sys.exit(1)
try:
os.setgid(gid)
except OSError:
self.logger.error("Unable to change gid.")
sys.exit(1)
# Change uid
if self.user:
try:
uid = pwd.getpwnam(self.user).pw_uid
except KeyError:
self.logger.error("User {0} not found.".format(self.user))
sys.exit(1)
try:
os.setuid(uid)
except OSError:
self.logger.error("Unable to change uid.")
sys.exit(1)
try:
lockfile.write("%s" % (os.getpid()))
lockfile.flush()
except IOError:
self.logger.error("Unable to write pid to the pidfile.")
print("Unable to write pid to the pidfile.")
sys.exit(1)
# Set custom action on SIGTERM.
signal.signal(signal.SIGTERM, self.sigterm)
atexit.register(self.exit)
self.logger.warn("Starting daemon.")
self.action(*privileged_action_result)
|
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Annealed Flow Transport (AFT) Monte Carlo algorithm.
For more detail see:
Arbel, Matthews and Doucet. 2021. Annealed Flow Transport Monte Carlo.
International Conference on Machine Learning.
"""
import time
from typing import NamedTuple, Tuple
from absl import logging
from annealed_flow_transport import flow_transport
from annealed_flow_transport import markov_kernel
import annealed_flow_transport.aft_types as tp
import jax
import jax.numpy as jnp
import numpy as np
import optax
Array = jnp.ndarray
UpdateFn = tp.UpdateFn
OptState = tp.OptState
FlowParams = tp.FlowParams
FlowApply = tp.FlowApply
LogDensityNoStep = tp.LogDensityNoStep
InitialSampler = tp.InitialSampler
RandomKey = tp.RandomKey
SamplesTuple = tp.SamplesTuple
FreeEnergyAndGrad = tp.FreeEnergyAndGrad
MarkovKernelApply = tp.MarkovKernelApply
FreeEnergyEval = tp.FreeEnergyEval
VfesTuple = tp.VfesTuple
LogDensityByStep = tp.LogDensityByStep
AcceptanceTuple = tp.AcceptanceTuple
LogWeightsTuple = tp.LogWeightsTuple
AlgoResultsTuple = tp.AlgoResultsTuple
def get_initial_samples_log_weight_tuples(
initial_sampler: InitialSampler, key: RandomKey,
config) -> Tuple[SamplesTuple, LogWeightsTuple]:
"""Get initial train/validation/test state depending on config."""
batch_sizes = (config.estimation_batch_size,
config.estimation_batch_size,
config.batch_size)
subkeys = jax.random.split(key, 3)
samples_tuple = SamplesTuple(*[
initial_sampler(elem, batch, config.sample_shape)
for elem, batch in zip(subkeys, batch_sizes)
])
log_weights_tuple = LogWeightsTuple(*[-jnp.log(batch) * jnp.ones(
batch) for batch in batch_sizes])
return samples_tuple, log_weights_tuple
def update_tuples(
samples_tuple: SamplesTuple, log_weights_tuple: LogWeightsTuple,
key: RandomKey, flow_apply: FlowApply, flow_params: FlowParams,
markov_kernel_apply: MarkovKernelApply, log_density: LogDensityByStep,
step: int, config) -> Tuple[SamplesTuple, LogWeightsTuple, AcceptanceTuple]:
"""Update the samples and log weights and return diagnostics."""
samples_list = []
log_weights_list = []
acceptance_tuple_list = []
subkeys = jax.random.split(key, 3)
for curr_samples, curr_log_weights, subkey in zip(samples_tuple,
log_weights_tuple,
subkeys):
new_samples, new_log_weights, acceptance_tuple = flow_transport.update_samples_log_weights(
flow_apply=flow_apply,
markov_kernel_apply=markov_kernel_apply,
flow_params=flow_params,
samples=curr_samples,
log_weights=curr_log_weights,
key=subkey,
log_density=log_density,
step=step,
config=config)
samples_list.append(new_samples)
log_weights_list.append(new_log_weights)
acceptance_tuple_list.append(acceptance_tuple)
samples_tuple = SamplesTuple(*samples_list)
log_weights_tuple = LogWeightsTuple(*log_weights_list)
test_acceptance_tuple = acceptance_tuple_list[-1]
return samples_tuple, log_weights_tuple, test_acceptance_tuple
class OptimizationLoopState(NamedTuple):
opt_state: OptState
flow_params: FlowParams
inner_step: int
opt_vfes: VfesTuple
best_params: FlowParams
best_validation_vfe: Array
best_index: int
def flow_estimate_step(loop_state: OptimizationLoopState,
free_energy_and_grad: FreeEnergyAndGrad,
train_samples: Array, train_log_weights: Array,
outer_step: int, validation_samples: Array,
validation_log_weights: Array,
free_energy_eval: FreeEnergyEval,
opt_update: UpdateFn) -> OptimizationLoopState:
"""A single step of the flow estimation loop."""
# Evaluate the flow on train and validation particles.
train_vfe, flow_grads = free_energy_and_grad(loop_state.flow_params,
train_samples,
train_log_weights,
outer_step)
validation_vfe = free_energy_eval(loop_state.flow_params,
validation_samples,
validation_log_weights,
outer_step)
# Update the best parameters, best validation vfe and index
# if the measured validation vfe is better.
validation_vfe_is_better = validation_vfe < loop_state.best_validation_vfe
new_best_params = jax.lax.cond(validation_vfe_is_better,
lambda _: loop_state.flow_params,
lambda _: loop_state.best_params,
operand=None)
new_best_validation_vfe = jnp.where(validation_vfe_is_better,
validation_vfe,
loop_state.best_validation_vfe)
new_best_index = jnp.where(validation_vfe_is_better,
loop_state.inner_step,
loop_state.best_index)
# Update the logs of train and validation vfes.
new_train_vfes = jax.ops.index_update(loop_state.opt_vfes.train_vfes,
loop_state.inner_step,
train_vfe)
new_validation_vfes = jax.ops.index_update(
loop_state.opt_vfes.validation_vfes, loop_state.inner_step,
validation_vfe)
new_opt_vfes = VfesTuple(train_vfes=new_train_vfes,
validation_vfes=new_validation_vfes)
# Apply gradients ready for next round of flow evaluations in the next step.
updates, new_opt_state = opt_update(flow_grads,
loop_state.opt_state)
new_flow_params = optax.apply_updates(loop_state.flow_params,
updates)
new_inner_step = loop_state.inner_step + 1
# Pack everything into the next loop state.
new_state_tuple = OptimizationLoopState(new_opt_state, new_flow_params,
new_inner_step, new_opt_vfes,
new_best_params,
new_best_validation_vfe,
new_best_index)
return new_state_tuple
def flow_estimation_should_continue(loop_state: OptimizationLoopState,
opt_iters: int,
stopping_criterion: str) -> bool:
"""Based on stopping criterion control termination of flow estimation."""
if stopping_criterion == 'time':
return loop_state.inner_step < opt_iters
elif stopping_criterion == 'greedy_time':
index = loop_state.inner_step
best_index = loop_state.best_index
return jnp.logical_and(best_index == index-1, index < opt_iters)
else:
raise NotImplementedError
def optimize_free_energy(
opt_update: UpdateFn, opt_init_state: OptState,
flow_init_params: FlowParams, free_energy_and_grad: FreeEnergyAndGrad,
free_energy_eval: FreeEnergyEval, train_samples: Array,
train_log_weights: Array, validation_samples: Array,
validation_log_weights: Array, outer_step: int, opt_iters: int,
stopping_criterion: str) -> Tuple[FlowParams, VfesTuple]:
"""Optimize an estimate of the free energy.
Args:
opt_update: function that updates the state of flow based on gradients etc.
opt_init_state: initial state variables of the optimizer.
flow_init_params: initial parameters of the flow.
free_energy_and_grad: function giving estimate of free energy and gradient.
free_energy_eval: function giving estimate of free energy only.
train_samples: Array of shape (batch,)+sample_shape
train_log_weights: Array of shape (batch,)
validation_samples: Array of shape (batch,)
validation_log_weights: Array of shape (batch,)
outer_step: int giving current outer step of algorithm.
opt_iters: number of flow estimation iters.
stopping_criterion: One of 'time' or 'greedy-time'.
Returns:
flow_params: optimized flow parameters.
free_energies: array containing all estimates of free energy.
"""
opt_state = opt_init_state
flow_params = flow_init_params
train_vfes = jnp.zeros(opt_iters)
validation_vfes = jnp.zeros(opt_iters)
opt_vfes = VfesTuple(train_vfes, validation_vfes)
def body_fun(loop_state: OptimizationLoopState) -> OptimizationLoopState:
return flow_estimate_step(loop_state, free_energy_and_grad, train_samples,
train_log_weights, outer_step, validation_samples,
validation_log_weights, free_energy_eval,
opt_update)
def cond_fun(loop_state: OptimizationLoopState) -> bool:
return flow_estimation_should_continue(loop_state, opt_iters,
stopping_criterion)
initial_loop_state = OptimizationLoopState(opt_state, flow_params, 0,
opt_vfes, flow_params, jnp.inf, -1)
final_loop_state = jax.lax.while_loop(cond_fun,
body_fun,
initial_loop_state)
return final_loop_state.best_params, final_loop_state.opt_vfes
def inner_loop(
key: RandomKey, free_energy_and_grad: FreeEnergyAndGrad,
free_energy_eval: FreeEnergyEval, opt_update: UpdateFn,
opt_init_state: OptState, flow_init_params: FlowParams,
flow_apply: FlowApply, markov_kernel_apply: MarkovKernelApply,
samples_tuple: SamplesTuple, log_weights_tuple: LogWeightsTuple,
log_density: LogDensityByStep, step: int, config
) -> Tuple[FlowParams, OptState, VfesTuple, Array, AcceptanceTuple]:
"""Inner loop of the algorithm.
Args:
key: A JAX random key.
free_energy_and_grad: function giving estimate of free energy and gradient.
free_energy_eval: function giving estimate of free energy only.
opt_update: function that updates the state of flow based on gradients etc.
opt_init_state: initial state variables of the optimizer.
flow_init_params: initial parameters of the flow.
flow_apply: function that applies the flow.
markov_kernel_apply: functional that applies the Markov transition kernel.
samples_tuple: Tuple containing train/validation/test samples.
log_weights_tuple: Tuple containing train/validation/test log_weights.
log_density: function returning the log_density of a sample at given step.
step: int giving current step of algorithm.
config: experiment configuration.
Returns:
samples_final: samples after the full inner loop has been performed.
log_weights_final: log_weights after the full inner loop has been performed.
free_energies: array containing all estimates of free energy.
log_normalizer_increment: Scalar log of normalizing constant increment.
"""
flow_params, vfes_tuple = optimize_free_energy(
opt_update=opt_update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
free_energy_and_grad=free_energy_and_grad,
free_energy_eval=free_energy_eval,
train_samples=samples_tuple.train_samples,
train_log_weights=log_weights_tuple.train_log_weights,
validation_samples=samples_tuple.validation_samples,
validation_log_weights=log_weights_tuple.validation_log_weights,
outer_step=step,
opt_iters=config.optimization_config.free_energy_iters,
stopping_criterion=config.stopping_criterion)
log_normalizer_increment = flow_transport.get_log_normalizer_increment(
samples_tuple.test_samples, log_weights_tuple.test_log_weights,
flow_apply, flow_params, log_density, step)
samples_tuple, log_weights_tuple, test_acceptance_tuple = update_tuples(
samples_tuple=samples_tuple,
log_weights_tuple=log_weights_tuple,
key=key,
flow_apply=flow_apply,
flow_params=flow_params,
markov_kernel_apply=markov_kernel_apply,
log_density=log_density,
step=step,
config=config)
return samples_tuple, log_weights_tuple, vfes_tuple, log_normalizer_increment, test_acceptance_tuple
def outer_loop_aft(opt_update: UpdateFn,
opt_init_state: OptState,
flow_init_params: FlowParams,
flow_apply: FlowApply,
initial_log_density: LogDensityNoStep,
final_log_density: LogDensityNoStep,
initial_sampler: InitialSampler,
key: RandomKey,
config,
log_step_output) -> AlgoResultsTuple:
"""The outer loop for Annealed Flow Transport Monte Carlo.
Args:
opt_update: A Optax optimizer update function.
opt_init_state: Optax initial state.
flow_init_params: Initial parameters for the flow.
flow_apply: Function that evaluates flow on parameters and samples.
initial_log_density: The log density of the starting distribution.
final_log_density: The log density of the target distribution.
initial_sampler: A function that produces the initial samples.
key: A Jax random key.
config: A ConfigDict containing the configuration.
log_step_output: Function to log step output or None.
Returns:
An AlgoResults tuple containing a summary of the results.
"""
num_temps = config.num_temps
density_by_step = flow_transport.GeometricAnnealingSchedule(
initial_log_density, final_log_density, num_temps)
markov_kernel_by_step = markov_kernel.MarkovTransitionKernel(
config.mcmc_config, density_by_step, num_temps)
def free_energy_short(flow_params: FlowParams,
samples: Array,
log_weights: Array,
step: int) -> Array:
return flow_transport.transport_free_energy_estimator(
samples, log_weights, flow_apply, flow_params, density_by_step, step)
free_energy_eval = jax.jit(free_energy_short)
free_energy_and_grad = jax.value_and_grad(free_energy_short)
key, subkey = jax.random.split(key)
samples_tuple, log_weights_tuple = get_initial_samples_log_weight_tuples(
initial_sampler, subkey, config)
def short_inner_loop(rng_key: RandomKey,
loc_samples_tuple: SamplesTuple,
loc_log_weights_tuple: LogWeightsTuple,
loc_step: int):
return inner_loop(key=rng_key,
free_energy_and_grad=free_energy_and_grad,
free_energy_eval=free_energy_eval,
opt_update=opt_update,
opt_init_state=opt_init_state,
flow_init_params=flow_init_params,
flow_apply=flow_apply,
markov_kernel_apply=markov_kernel_by_step,
samples_tuple=loc_samples_tuple,
log_weights_tuple=loc_log_weights_tuple,
log_density=density_by_step,
step=loc_step,
config=config)
logging.info('Jitting step...')
inner_loop_jit = jax.jit(short_inner_loop)
opt_iters = config.optimization_config.free_energy_iters
if log_step_output is not None:
zero_vfe_tuple = VfesTuple(train_vfes=jnp.zeros(opt_iters),
validation_vfes=jnp.zeros(opt_iters))
log_step_output(samples_tuple, log_weights_tuple, zero_vfe_tuple, 0., 1.,
1., config.write_samples)
logging.info('Performing initial step redundantly for accurate timing...')
initial_start_time = time.time()
inner_loop_jit(key, samples_tuple, log_weights_tuple, 1)
initial_finish_time = time.time()
initial_time_diff = initial_finish_time - initial_start_time
logging.info('Initial step time / seconds %f: ', initial_time_diff)
logging.info('Launching training...')
log_normalizer_estimate = 0.
start_time = time.time()
for step in range(1, num_temps):
subkey, key = jax.random.split(key)
samples_tuple, log_weights_tuple, vfes_tuple, log_normalizer_increment, test_acceptance = inner_loop_jit(
subkey, samples_tuple, log_weights_tuple, step)
acceptance_nuts = float(np.asarray(test_acceptance[0]))
acceptance_hmc = float(np.asarray(test_acceptance[1]))
log_normalizer_estimate += log_normalizer_increment
if step % config.report_step == 0:
beta = density_by_step.get_beta(step)
logging.info(
'Step %05d: beta %f Acceptance rate NUTS %f Acceptance rate HMC %f',
step, beta, acceptance_nuts, acceptance_hmc
)
if log_step_output is not None:
log_step_output(samples_tuple, log_weights_tuple,
vfes_tuple, log_normalizer_increment, acceptance_nuts,
acceptance_hmc)
finish_time = time.time()
delta_time = finish_time - start_time
logging.info('Delta time / seconds %f: ', delta_time)
logging.info('Log normalizer estimate %f: ', log_normalizer_estimate)
results = AlgoResultsTuple(
test_samples=samples_tuple.test_samples,
test_log_weights=log_weights_tuple.test_log_weights,
log_normalizer_estimate=log_normalizer_estimate,
delta_time=delta_time,
initial_time_diff=initial_time_diff)
return results
|
|
"""(C) Frank-Rene Schaefer
Path Compression ___________________________________________________________
Consider the file 'engine/analyzer/mega_state/path/core.py' for a detailed
explanation of path compression.
Code Generation ____________________________________________________________
Let 'path walker' be a code fragment that is able to 'walk' along a given
path and follow a 'skeleton', i.e. a general transition map, if the current
character is not the one of the path. As described in the above file,
a state is defined by a 'path walker' index and an iterator position that
points to the position of a specific character string. Following code
fragments need to be generated:
(1) The pathes, i.e. array containing identified sequences, e.g.
QUEX_CHARACTER_TYPE path_0 = { 'o', 'r', PTC };
QUEX_CHARACTER_TYPE path_1 = { 'h', 'i', 'l', 'e', PTC };
QUEX_CHARACTER_TYPE path_2 = { 'e', 't', 'u', 'r', 'n', PTC };
where PTC is the path terminating code that must be
different from the buffer limit code BLC.
The init state shall usually not be a path state. It rather routes
to paths. This is why identified pathes usually do not contain the
first character of a related keyword. Note, however, that quex
may find paths that are not explicitly considered by the user.
(2) The path walker.
The path walker consist of a 'prolog' where the current input character
is checked whether it belongs to the path, and the remainging trigger
map, in case that the path is left, e.g.
PATH_WALKER_1:
/* Single Character Check */
if input == *path_iterator: ++path_iterator; goto PATH_WALKER_1
elif *path_iterator == PTC: goto STATE_3
/* Common Transition Map */
if x < 'a': drop out
elif x > 'z': drop out
else: goto STATE_4
(3) State entries
It is very plausible that states that belong to a path are not
entered except through 'path walk' along the character sequence.
In general, however, a state of a path might be entered from
anywhere. Thus, at least for those states that are entered from
elsewhere, a path entry must be provided.
A path entry consists of: setting the path iterator and goto the
related path walker. Additionally, state attributes, such as
'acceptance' and 'store_input_position' must be considered.
Example:
STATE_10:
path_iterator = path_0;
goto PATH_WALKER_1;
STATE_11:
path_iterator = path_0 + 1;
goto PATH_WALKER_1;
...
(4) State router, this might be necessary, if states are non-uniform.
Because, after reload the current state entry must passed by again.
In buffer based analysis no state router is required. Example of
a state router (same as for template compression):
switch( state_index ) {
case 2222: goto STATE_2222;
case 3333: goto STATE_3333;
...
}
"""
from quex.engine.analyzer.mega_state.path_walker.state import PathWalkerState
from quex.engine.generator.state.core import input_do
from quex.engine.generator.languages.variable_db import variable_db
from quex.blackboard import setup as Setup
def framework(txt, PWState, TheAnalyzer):
"""Implement the Pathwalker's framework. The scheme for a path-walker
is the following:
Pathwalker Head:
Compares the current 'input' character if it is still
on the path or not. If it is on the path we increment
the 'path_iterator' and re-enter the path walker. If
not, then the thread of control enters the transition
map.
Pathwalker Transition Map:
The transition map is the common transition map that all
implemented states had in common. Now, transitions to
states outside the path may happen.
"""
LanguageDB = Setup.language_db
input_do(txt, PWState, TheAnalyzer, ForceInputDereferencingF=True)
LanguageDB.STATE_DEBUG_INFO(txt, PWState)
# Three Versions of PathWalker Heads:
if PWState.uniform_entry_command_list_along_all_paths is not None:
# UNIFORM PATHS: Along the path, always the same (or no) commands are executed.
#
# PathWalker Head Implementation:
#
# if input == *path_iterator:
# path_iterator += 1
# if *path_iterator != TerminationCode: goto CommonPathWalkerDoor
# else: goto TerminalDoor
#
# -- "goto CommonPathWalkerDoor"
uniform_entry_door_id = PWState.entry.get_door_id(PWState.index, PWState.index)
goto_next_door = " %s\n" % LanguageDB.GOTO_BY_DOOR_ID(uniform_entry_door_id)
# -- "goto TerminalDoor"
uniform_terminal_entry_door_id = PWState.get_uniform_terminal_entry_door_id(TheAnalyzer.state_db)
if uniform_terminal_entry_door_id is not None:
# All path have same terminal state and enter it at the same door
goto_terminal_door = " %s\n" % LanguageDB.GOTO_BY_DOOR_ID(uniform_terminal_entry_door_id)
else:
# The terminals of the paths are different
#
# The "goto TerminalDoor" is implemented for each path. The single
# goto is split into a sequence:
#
# if path_iterator == path_0_end: goto TerminalDoorOfPath0
# else if path_iterator == path_1_end: goto TerminalDoorOfPath1
# else if path_iterator == path_2_end: goto TerminalDoorOfPath2
# ...
tmp = ""
for path_id, sequence in enumerate(PWState.path_list):
terminal_door_id = PathWalkerState.get_terminal_door_id(sequence, TheAnalyzer.state_db)
tmp += " %s" % LanguageDB.IF("path_iterator", "==", "path_walker_%i_path_%i + %s" % \
(PWState.index, path_id, len(sequence)-1), \
FirstF=(path_id == 0)) \
+ " %s\n" % LanguageDB.GOTO_BY_DOOR_ID(terminal_door_id)
tmp += " %s" % LanguageDB.ELSE
tmp += " %s\n" % LanguageDB.UNREACHABLE
tmp += " %s\n" % LanguageDB.END_IF()
goto_terminal_door = tmp
path_walker_head = [" %s" % LanguageDB.IF_INPUT("==", "*path_iterator"),
" %s\n" % LanguageDB.PATH_ITERATOR_INCREMENT,
" %s" % LanguageDB.IF("*path_iterator", "!=", "QUEX_SETTING_PATH_TERMINATION_CODE"),
goto_next_door,
" %s" % LanguageDB.ELSE,
goto_terminal_door,
" %s\n" % LanguageDB.END_IF(),
" %s\n" % LanguageDB.END_IF()]
else:
# NON UNIFORM PATHS
#
# PathWalker Head Implementation:
#
# if input == *path_iterator:
# path_iterator += 1
# goto NextDoor(path_iterator)
#
# Here, the "goto TerminalDoor" results from NextDoor(path_iterator)
# automatically, when the path_iterator stands on the last element.
#
label = "path_walker_%i_state_base[path_iterator - path_walker_%i_reference]" \
% (PWState.index, PWState.index)
goto_next_door = "%s" % (LanguageDB.GOTO_BY_VARIABLE(label))
path_walker_head = [" %s" % LanguageDB.IF_INPUT("==", "*path_iterator"),
" %s\n" % LanguageDB.PATH_ITERATOR_INCREMENT,
" %s\n" % goto_next_door,
" %s\n" % LanguageDB.END_IF()]
txt.extend(path_walker_head)
return
def require_data(PWState, TheAnalyzer):
"""Defines the transition targets for each involved state.
"""
LanguageDB = Setup.language_db
variable_db.require("path_iterator")
def __door_adr_sequences():
result = ["{\n"]
offset = 0
for path_id, path in enumerate(PWState.path_list):
# NOTE: For all states in the path the 'from_state_index, to_state_index' can
# be determined, **except** for the FIRST state in the path. Thus for
# this state the 'door' cannot be determined in case that it is
# "not uniform_doors_f()".
#
# However, the only occasion where the FIRST state in the path may be
# used is reload during the FIRST state. The reload adapts the positions
# and acceptances are not changed. So, we can use the common entry
# to the first state as a reference here.
prev_state_index = path[0][0]
result.append(" ")
for state_index in (x[0] for x in path[1:]):
result.append("QUEX_LABEL(%i), " % LanguageDB.ADDRESS(state_index, prev_state_index))
prev_state_index = state_index
result.append("/* Zero of Elegance */0x0,")
result.append("\n")
offset += len(path)
result.append(" }");
return offset, result
def __character_sequences():
result = ["{\n"]
offset = 0
for path_id, path in enumerate(PWState.path_list):
# Commenting the transition sequence is not dangerous. 'COMMENT' eliminates
# comment delimiters if they would appear in the sequence_str.
# sequence_str = imap(lambda x: Interval(x[1]).get_utf8_string(), path[:-1])
# memory.append(LanguageDB.COMMENT("".join(sequence_str)) + "\n")
# Last element of sequence contains only the 'end state'.
result.append(" ")
result.extend("%i, " % x.transition_char_to_next for x in path[:-1])
result.append("QUEX_SETTING_PATH_TERMINATION_CODE,")
result.append("\n")
variable_db.require("path_walker_%i_path_%i",
Initial = "path_walker_%i_path_base + %i" % (PWState.index, offset),
Index = (PWState.index, path_id))
offset += len(path)
result.append(" }")
return offset, result
# (*) Path Walker Basis
# The 'base' must be defined before all --> PriorityF (see table in variable_db)
element_n, character_sequence_str = __character_sequences()
variable_db.require_array("path_walker_%i_path_base",
ElementN = element_n,
Initial = character_sequence_str,
Index = PWState.index)
# (*) The State Information for each path step
if PWState.uniform_entry_door_id_along_all_paths is None:
element_n, state_sequence_str = __door_adr_sequences()
variable_db.require_array("path_walker_%i_state_base",
ElementN = element_n,
Initial = state_sequence_str,
Index = PWState.index)
# The path_iterator is incremented before the 'goto', thus
# 'path_iterator - (path_base + 1)' gives actually the correct offset.
# We define a variable for that, for elegance.
variable_db.require("path_walker_%i_reference",
Initial = "path_walker_%i_path_base + 1" % PWState.index,
Index = (PWState.index))
|
|
# -*- coding: utf-8 -*-
"""
.. module:: skrf.networkSet
========================================
networkSet (:mod:`skrf.networkSet`)
========================================
Provides a class representing an un-ordered set of n-port microwave networks.
Frequently one needs to make calculations, such as mean or standard
deviation, on an entire set of n-port networks. To facilitate these
calculations the :class:`NetworkSet` class provides convenient
ways to make such calculations.
Another usage is to interpolate a set of Networks which depend of
an parameter (like a knob, or a geometrical parameter).
The results are returned in :class:`~skrf.network.Network` objects,
so they can be plotted and saved in the same way one would do with a
:class:`~skrf.network.Network`.
The functionality in this module is provided as methods and
properties of the :class:`NetworkSet` Class.
NetworkSet Class
================
.. autosummary::
:toctree: generated/
NetworkSet
NetworkSet Utilities
====================
.. autosummary::
:toctree: generated/
func_on_networks
getset
"""
import zipfile
import numpy as npy
import typing
from io import BytesIO
from scipy.interpolate import interp1d
from . network import Network, Frequency, PRIMARY_PROPERTIES, COMPONENT_FUNC_DICT
from . import mathFunctions as mf
from . util import now_string_2_dt
try:
from numpy.typing import ArrayLike
except ImportError:
ArrayLike = typing.Any
class NetworkSet(object):
"""
A set of Networks.
This class allows functions on sets of Networks, such as mean or
standard deviation, to be calculated conveniently. The results are
returned in :class:`~skrf.network.Network` objects, so that they may be
plotted and saved in like :class:`~skrf.network.Network` objects.
This class also provides methods which can be used to plot uncertainty
bounds for a set of :class:`~skrf.network.Network`.
The names of the :class:`NetworkSet` properties are generated
dynamically upon initialization, and thus documentation for
individual properties and methods is not available. However, the
properties do follow the convention::
>>> my_network_set.function_name_network_property_name
For example, the complex average (mean)
:class:`~skrf.network.Network` for a
:class:`NetworkSet` is::
>>> my_network_set.mean_s
This accesses the property 's', for each element in the
set, and **then** calculates the 'mean' of the resultant set. The
order of operations is important.
Results are returned as :class:`~skrf.network.Network` objects,
so they may be plotted or saved in the same way as for
:class:`~skrf.network.Network` objects::
>>> my_network_set.mean_s.plot_s_mag()
>>> my_network_set.mean_s.write_touchstone('mean_response')
If you are calculating functions that return scalar variables, then
the result is accessible through the Network property .s_re. For
example::
>>> std_s_deg = my_network_set.std_s_deg
This result would be plotted by::
>>> std_s_deg.plot_s_re()
The operators, properties, and methods of NetworkSet object are
dynamically generated by private methods
* :func:`~NetworkSet.__add_a_operator`
* :func:`~NetworkSet.__add_a_func_on_property`
* :func:`~NetworkSet.__add_a_element_wise_method`
* :func:`~NetworkSet.__add_a_plot_uncertainty`
thus, documentation on the individual methods and properties are
not available.
"""
def __init__(self, ntwk_set: typing.Union[list, dict], name: str = None):
"""
Initialize for NetworkSet.
Parameters
----------
ntwk_set : list of :class:`~skrf.network.Network` objects
the set of :class:`~skrf.network.Network` objects
name : string
the name of the NetworkSet, given to the Networks returned
from properties of this class.
"""
if not isinstance(ntwk_set, (list, dict)):
raise ValueError('NetworkSet requires a list as argument')
# dict is authorized for convenience
# but if a dict is passed instead of a list -> list
if hasattr(ntwk_set, 'values'):
ntwk_set = list(ntwk_set.values())
# list should not be empty
if len(ntwk_set) == 0:
raise ValueError('Input list should not be empty')
# did they pass a list of Networks?
if not all([isinstance(ntwk, Network) for ntwk in ntwk_set]):
raise(TypeError('input must be list of Network types'))
# do all Networks have the same # ports?
if len (set([ntwk.number_of_ports for ntwk in ntwk_set])) > 1:
raise(ValueError('All elements in list of Networks must have same number of ports'))
# is all frequency information the same?
if npy.all([(ntwk_set[0].frequency == ntwk.frequency) \
for ntwk in ntwk_set]) == False:
raise(ValueError('All elements in list of Networks must have same frequency information'))
## initialization
# we are good to go
self.ntwk_set = ntwk_set
self.name = name
# create list of network properties, which we use to dynamically
# create a statistical properties of this set
network_property_list = [k+'_'+l \
for k in PRIMARY_PROPERTIES \
for l in COMPONENT_FUNC_DICT.keys()] + \
['passivity','s']
# dynamically generate properties. this is slick.
max, min = npy.max, npy.min
max.__name__ = 'max'
min.__name__ = 'min'
for network_property_name in network_property_list:
for func in [npy.mean, npy.std, max, min]:
self.__add_a_func_on_property(func, network_property_name)
if 'db' not in network_property_name:# != 's_db' and network_property_name != 's':
# db uncertainty requires a special function call see
# plot_uncertainty_bounds_s_db
self.__add_a_plot_uncertainty(network_property_name)
self.__add_a_plot_minmax(network_property_name)
self.__add_a_element_wise_method('plot_'+network_property_name)
self.__add_a_element_wise_method('plot_s_db')
self.__add_a_element_wise_method('plot_s_db_time')
for network_method_name in \
['write_touchstone','interpolate','plot_s_smith']:
self.__add_a_element_wise_method(network_method_name)
for operator_name in \
['__pow__','__floordiv__','__mul__','__div__','__add__','__sub__']:
self.__add_a_operator(operator_name)
@classmethod
def from_zip(cls, zip_file_name: str, sort_filenames: bool = True, *args, **kwargs):
r"""
Create a NetworkSet from a zipfile of touchstones.
Parameters
----------
zip_file_name : string
name of zipfile
sort_filenames: Boolean
sort the filenames in the zip file before constructing the
NetworkSet
\*args, \*\*kwargs : arguments
passed to NetworkSet constructor
Examples
--------
>>> import skrf as rf
>>> my_set = rf.NetworkSet.from_zip('myzip.zip')
"""
z = zipfile.ZipFile(zip_file_name)
filename_list = z.namelist()
ntwk_list = []
if sort_filenames:
filename_list.sort()
for filename in filename_list:
# try/except block in case not all files are touchstones
try: # Ascii files (Touchstone, etc)
n = Network.zipped_touchstone(filename, z)
ntwk_list.append(n)
continue
except:
pass
try: # Binary files (pickled Network)
fileobj = BytesIO(z.open(filename).read())
fileobj.name = filename
n = Network(fileobj)
ntwk_list.append(n)
continue
except:
pass
return cls(ntwk_list)
@classmethod
def from_dir(cls, dir: str = '.', *args, **kwargs):
r"""
Create a NetworkSet from a directory containing Networks.
This just calls ::
rf.NetworkSet(rf.read_all_networks(dir), *args, **kwargs)
Parameters
----------
dir : str
directory containing Network files.
\*args, \*\*kwargs :
passed to NetworkSet constructor
Examples
--------
>>> my_set = rf.NetworkSet.from_dir('./data/')
"""
from . io.general import read_all_networks
return cls(read_all_networks(dir), *args, **kwargs)
@classmethod
def from_s_dict(cls, d: dict, frequency: Frequency, *args, **kwargs):
r"""
Create a NetworkSet from a dictionary of s-parameters
The resultant elements of the NetworkSet are named by the keys of
the dictionary.
Parameters
-------------
d : dict
dictionary of s-parameters data. values of this should be
:class:`numpy.ndarray` assignable to :attr:`skrf.network.Network.s`
frequency: :class:`~skrf.frequency.Frequency` object
frequency assigned to each network
\*args, \*\*kwargs :
passed to Network.__init__ for each key/value pair of d
Returns
----------
ns : NetworkSet
See Also
----------
NetworkSet.to_s_dict
"""
return cls([Network(s=d[k], frequency=frequency, name=k,
*args, **kwargs) for k in d])
def __add_a_operator(self, operator_name):
"""
Add an operator method to the NetworkSet.
this is made to
take either a Network or a NetworkSet. if a Network is passed
to the operator, each element of the set will operate on the
Network. If a NetworkSet is passed to the operator, and is the
same length as self. then it will operate element-to-element
like a dot-product.
"""
def operator_func(self, other):
if isinstance(other, NetworkSet):
if len(other) != len(self):
raise(ValueError('Network sets must be of same length to be cascaded'))
return NetworkSet([self.ntwk_set[k].__getattribute__(operator_name)(other.ntwk_set[k]) for k in range(len(self))])
elif isinstance(other, Network):
return NetworkSet([ntwk.__getattribute__(operator_name)(other) for ntwk in self.ntwk_set])
else:
raise(TypeError('NetworkSet operators operate on either Network, or NetworkSet types'))
setattr(self.__class__,operator_name,operator_func)
def __str__(self):
"""
"""
return self.ntwk_set.__str__()
def __repr__(self):
return self.__str__()
def __getitem__(self,key):
"""
Return an element of the network set.
"""
if isinstance(key, str):
# if they pass a string then slice each network in this set
return NetworkSet([k[key] for k in self.ntwk_set],
name = self.name)
else:
return self.ntwk_set[key]
def __len__(self) -> int:
"""
Return the number of Networks in a NetworkSet.
Return
------
len: int
Number of Networks in a NetworkSet
"""
return len(self.ntwk_set)
def __eq__(self, other: 'NetworkSet') -> bool:
"""
Compare the NetworkSet with another NetworkSet.
Two NetworkSets are considered equal of their Networks are all equals
(in the same order)
Returns
-------
is_equal: bool
"""
# of course they should have equal lengths
if len(self) != len(other):
return False
# compare all networks in the order of the list
# return False as soon as 2 networks are different
for (ntwk, ntwk_other) in zip(self.ntwk_set, other):
if ntwk != ntwk_other:
return False
return True
def __add_a_element_wise_method(self, network_method_name: str):
def func(self, *args, **kwargs):
return self.element_wise_method(network_method_name, *args, **kwargs)
setattr(self.__class__,network_method_name,func)
def __add_a_func_on_property(self, func, network_property_name: str):
"""
Dynamically add a property to this class (NetworkSet).
this is mostly used internally to genrate all of the classes
properties.
Parameters
----------
func: a function to be applied to the network_property
across the first axis of the property's output
network_property_name: str
a property of the Network class,
which must have a matrix output of shape (f, n, n)
example
-------
>>> my_ntwk_set.add_a_func_on_property(mean, 's')
"""
fget = lambda self: fon(self.ntwk_set,func,network_property_name,\
name = self.name)
setattr(self.__class__,func.__name__+'_'+network_property_name,\
property(fget))
def __add_a_plot_uncertainty(self, network_property_name: str):
"""
Add a plot uncertainty to a Network property.
Parameter
---------
network_property_name: str
A property of the Network class,
which must have a matrix output of shape (f, n, n)
Parameter
---------
>>> my_ntwk_set.__add_a_plot_uncertainty('s')
"""
def plot_func(self,*args, **kwargs):
kwargs.update({'attribute':network_property_name})
self.plot_uncertainty_bounds_component(*args,**kwargs)
setattr(self.__class__,'plot_uncertainty_bounds_'+\
network_property_name,plot_func)
setattr(self.__class__,'plot_ub_'+\
network_property_name,plot_func)
def __add_a_plot_minmax(self, network_property_name: str):
"""
Parameter
---------
network_property_name: str
A property of the Network class,
which must have a matrix output of shape (f, n, n)
Example
-------
>>> my_ntwk_set.__add_a_plot_minmax('s')
"""
def plot_func(self,*args, **kwargs):
kwargs.update({'attribute':network_property_name})
self.plot_minmax_bounds_component(*args,**kwargs)
setattr(self.__class__,'plot_minmax_bounds_'+\
network_property_name,plot_func)
setattr(self.__class__,'plot_mm_'+\
network_property_name,plot_func)
def to_dict(self) -> dict:
"""
Return a dictionary representation of the NetworkSet.
Return
------
d : dict
The returned dictionary has the Network names for keys,
and the Networks as values.
"""
return dict([(k.name, k) for k in self.ntwk_set])
def to_s_dict(self):
"""
Converts a NetworkSet to a dictionary of s-parameters.
The resultant keys of the dictionary are the names of the Networks
in NetworkSet
Returns
-------
s_dict : dictionary
contains s-parameters in the form of complex numpy arrays
See Also
--------
NetworkSet.from_s_dict
"""
d = self.to_dict()
for k in d:
d[k] = d[k].s
return d
def element_wise_method(self, network_method_name: str, *args, **kwargs) -> 'NetworkSet':
"""
Call a given method of each element and returns the result as
a new NetworkSet if the output is a Network.
Parameter
---------
network_property_name: str
A property of the Network class,
which must have a matrix output of shape (f, n, n)
Return
------
ns: :class: `~skrf.networkSet.NetworkSet`
"""
output = [ntwk.__getattribute__(network_method_name)(*args, **kwargs) for ntwk in self.ntwk_set]
if isinstance(output[0],Network):
return NetworkSet(output)
else:
return output
def copy(self) -> 'NetworkSet':
"""
Copie each network of the network set.
Return
------
ns: :class: `~skrf.networkSet.NetworkSet`
"""
return NetworkSet([k.copy() for k in self.ntwk_set])
def sort(self, key=lambda x: x.name, inplace: bool = True, **kwargs) -> typing.Union[None, 'NetworkSet']:
r"""
Sort this network set.
Parameters
----------
key:
inplace: bool
Sort the NetworkSet object directly if True,
return the sorted NetworkSet if False. Default is True.
\*\*kwargs : dict
keyword args passed to builtin sorted acting on self.ntwk_set
Return
------
ns: None if inplace=True, NetworkSet if False
Examples
--------
>>> ns = rf.NetworkSet.from_dir('mydir')
>>> ns.sort()
Sort by other property:
>>> ns.sort(key= lambda x: x.voltage)
Returns a new NetworkSet:
>>> sorted_ns = ns.sort(inplace=False)
"""
sorted_ns = sorted(self.ntwk_set, key = key, **kwargs)
if inplace:
self.ntwk_set = sorted_ns
else:
return sorted_ns
def rand(self, n: int = 1):
"""
Return `n` random samples from this NetworkSet.
Parameters
----------
n : int
number of samples to return (default is 1)
"""
idx = npy.random.randint(0,len(self), n)
out = [self.ntwk_set[k] for k in idx]
if n ==1:
return out[0]
else:
return out
def filter(self, s: str) -> 'NetworkSet':
"""
Filter NetworkSet based on a string in `Network.name`.
Notes
-----
This is just
`NetworkSet([k for k in self if s in k.name])`
Parameters
----------
s: str
string contained in network elements to be filtered
Returns
--------
ns : :class: `skrf.NetworkSet`
Examples
-----------
>>> ns.filter('monday')
"""
return NetworkSet([k for k in self if s in k.name])
def scalar_mat(self, param: str = 's') -> npy.ndarray:
"""
Return a scalar ndarray representing `param` data vs freq and element idx.
output is a 3d array with axes (freq, ns_index, port/ri)
ports is a flattened re/im components of port index (`len = 2*nports**2`)
Parameter
---------
param : str
name of the parameter to export. Default is 's'.
Return
------
x : :class: npy.ndarray
"""
ntwk = self[0]
nfreq = len(ntwk)
# x will have the axes (frequency, observations, ports)
x = npy.array([[mf.flatten_c_mat(k.__getattribute__(param)[f]) \
for k in self] for f in range(nfreq)])
return x
def cov(self, **kw) -> npy.ndarray:
"""
Covariance matrix.
shape of output will be (nfreq, 2*nports**2, 2*nports**2)
"""
smat=self.scalar_mat(**kw)
return npy.array([npy.cov(k.T) for k in smat])
@property
def mean_s_db(self) -> Network:
"""
Return Network of mean magnitude in dB.
Return
------
ntwk : :class: `~skrf.network.Network`
Network of the mean magnitude in dB
Note
----
The mean is taken on the magnitude before converted to db, so
`magnitude_2_db(mean(s_mag))`
which is NOT the same as
`mean(s_db)`
"""
ntwk = self.mean_s_mag
ntwk.s = ntwk.s_db
return ntwk
@property
def std_s_db(self) -> Network:
"""
Return the Network of the standard deviation magnitude in dB.
Return
------
ntwk : :class: `~skrf.network.Network`
Network of the mean magnitude in dB
Note
----
The standard deviation is taken on the magnitude before converted to db, so
`magnitude_2_db(std(s_mag))`
which is NOT the same as
`std(s_db)`
"""
ntwk= self.std_s_mag
ntwk.s = ntwk.s_db
return ntwk
@property
def inv(self) -> 'NetworkSet':
"""
Return the NetworkSet of inverted Networks (Network.inv()).
Returns
-------
ntwkSet : :class: `~skrf.networkSet.NetworkSet`
NetworkSet of inverted Networks
"""
return NetworkSet( [ntwk.inv for ntwk in self.ntwk_set])
def add_polar_noise(self, ntwk: Network) -> Network:
"""
Parameters
----------
ntwk : :class: `~skrf.network.Network`
Returns
-------
ntwk : :class: `~skrf.network.Network`
"""
from scipy import stats
from numpy import frompyfunc
gimme_norm = lambda x: stats.norm(loc=0,scale=x).rvs(1)[0]
ugimme_norm = frompyfunc(gimme_norm,1,1)
s_deg_rv = npy.array(map(ugimme_norm, self.std_s_deg.s_re), dtype=float)
s_mag_rv = npy.array(map(ugimme_norm, self.std_s_mag.s_re), dtype=float)
mag = ntwk.s_mag + s_mag_rv
deg = ntwk.s_deg + s_deg_rv
ntwk.s = mag * npy.exp(1j*npy.pi/180*deg)
return ntwk
def set_wise_function(self, func, a_property: str, *args, **kwargs):
"""
Calls a function on a specific property of the Networks in this NetworkSet.
Parameters
----------
func : callable
a_property : str
Example
-------
>>> my_ntwk_set.set_wise_func(mean,'s')
"""
return fon(self.ntwk_set, func, a_property, *args, **kwargs)
def uncertainty_ntwk_triplet(self, attribute: str, n_deviations: int = 3) -> (Network, Network, Network):
"""
Return a 3-tuple of Network objects which contain the
mean, upper_bound, and lower_bound for the given Network
attribute.
Used to save and plot uncertainty information data.
Note that providing 's' and 's_mag' as attributes will provide different results.
For those who want to directly find uncertainty on dB performance, use 's_mag'.
Parameters
----------
attribute : str
Attribute to operate on.
n_deviations : int, optional
Number of standard deviation. The default is 3.
Returns
-------
ntwk_mean : :class: `~skrf.network.Network`
Network of the averaged attribute
lower_bound : :class: `~skrf.network.Network`
Network of the lower bound of N*sigma deviation.
upper_bound : :class: `~skrf.network.Network`
Network of the upper bound of N*sigma deviation.
Example
-------
>>> (ntwk_mean, ntwk_lb, ntwk_ub) = my_ntwk_set.uncertainty_ntwk_triplet('s')
>>> (ntwk_mean, ntwk_lb, ntwk_ub) = my_ntwk_set.uncertainty_ntwk_triplet('s_mag')
"""
ntwk_mean = self.__getattribute__('mean_'+attribute)
ntwk_std = self.__getattribute__('std_'+attribute)
ntwk_std.s = n_deviations * ntwk_std.s
upper_bound = (ntwk_mean + ntwk_std)
lower_bound = (ntwk_mean - ntwk_std)
return (ntwk_mean, lower_bound, upper_bound)
def datetime_index(self) -> list:
"""
Create a datetime index from networks names.
this is just:
`[rf.now_string_2_dt(k.name ) for k in self]`
"""
return [now_string_2_dt(k.name ) for k in self]
# io
def write(self, file=None, *args, **kwargs):
r"""
Write the NetworkSet to disk using :func:`~skrf.io.general.write`
Parameters
----------
file : str or file-object
filename or a file-object. If left as None then the
filename will be set to Calibration.name, if its not None.
If both are None, ValueError is raised.
\*args, \*\*kwargs : arguments and keyword arguments
passed through to :func:`~skrf.io.general.write`
Notes
-----
If the self.name is not None and file is can left as None
and the resultant file will have the `.ns` extension appended
to the filename.
Examples
---------
>>> ns.name = 'my_ns'
>>> ns.write()
See Also
---------
skrf.io.general.write
skrf.io.general.read
"""
# this import is delayed until here because of a circular dependency
from . io.general import write
if file is None:
if self.name is None:
raise (ValueError('No filename given. You must provide a filename, or set the name attribute'))
file = self.name
write(file, self, *args, **kwargs)
def write_spreadsheet(self, *args, **kwargs):
"""
Write contents of network to a spreadsheet, for your boss to use.
Example
-------
>>> ns.write_spreadsheet() # the ns.name attribute must exist
>>> ns.write_spreadsheet(file_name='testing.xlsx')
See Also
---------
skrf.io.general.network_2_spreadsheet
"""
from . io.general import networkset_2_spreadsheet
networkset_2_spreadsheet(self, *args, **kwargs)
def ntwk_attr_2_df(self, attr='s_db',m=0, n=0, *args, **kwargs):
"""
Converts an attributes of the Networks within a NetworkSet to a Pandas DataFrame.
Examples
--------
>>> df = ns.ntwk_attr_2_df('s_db', m=1, n=0)
>>> df.to_excel('output.xls') # see Pandas docs for more info
"""
from pandas import DataFrame, Series, Index
index = Index(
self[0].frequency.f_scaled,
name='Freq(%s)'%self[0].frequency.unit
)
df = DataFrame(
dict([('%s'%(k.name),
Series(k.__getattribute__(attr)[:,m,n],index=index))
for k in self]),
index = index,
)
return df
def interpolate_from_network(self, ntw_param: ArrayLike, x: float, interp_kind: str = 'linear'):
"""
Interpolate a Network from a NetworkSet, as a multi-file N-port network.
Assumes that the NetworkSet contains N-port networks
with same number of ports N and same number of frequency points.
These networks differ from an given array parameter `interp_param`,
which is used to interpolate the returned Network. Length of `interp_param`
should be equal to the length of the NetworkSet.
Parameters
----------
ntw_param : (N,) array_like
A 1-D array of real values. The length of ntw_param must be equal
to the length of the NetworkSet
x : real
Point to evaluate the interpolated network at
interp_kind: str
Specifies the kind of interpolation as a string: 'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'. Cf :class:`scipy.interpolate.interp1d` for detailed description.
Default is 'linear'.
Returns
-------
ntw : class:`~skrf.network.Network`
Network interpolated at x
Example
-------
Assuming that `ns` is a NetworkSet containing 3 Networks (length=3) :
>>> param_x = [1, 2, 3] # a parameter associated to each Network
>>> x0 = 1.5 # parameter value to interpolate for
>>> interp_ntwk = ns.interpolate_from_network(param_x, x0)
"""
ntw = self[0].copy()
# Interpolating the scattering parameters
s = npy.array([self[idx].s for idx in range(len(self))])
f = interp1d(ntw_param, s, axis=0, kind=interp_kind)
ntw.s = f(x)
return ntw
def func_on_networks(ntwk_list, func, attribute='s',name=None, *args,\
**kwargs):
r"""
Applies a function to some attribute of a list of networks.
Returns the result in the form of a Network. This means information
that may not be s-parameters is stored in the s-matrix of the
returned Network.
Parameters
-------------
ntwk_list : list of :class:`~skrf.network.Network` objects
list of Networks on which to apply `func` to
func : function
function to operate on `ntwk_list` s-matrices
attribute : string
attribute of Network's in ntwk_list for func to act on
\*args,\*\*kwargs : arguments and keyword arguments
passed to func
Returns
---------
ntwk : :class:`~skrf.network.Network`
Network with s-matrix the result of func, operating on
ntwk_list's s-matrices
Examples
----------
averaging can be implemented with func_on_networks by
>>> func_on_networks(ntwk_list, mean)
"""
data_matrix = \
npy.array([ntwk.__getattribute__(attribute) for ntwk in ntwk_list])
new_ntwk = ntwk_list[0].copy()
new_ntwk.s = func(data_matrix,axis=0,*args,**kwargs)
if name is not None:
new_ntwk.name = name
return new_ntwk
# short hand name for convenience
fon = func_on_networks
def getset(ntwk_dict, s, *args, **kwargs):
r"""
Creates a :class:`NetworkSet`, of all :class:`~skrf.network.Network`s
objects in a dictionary that contain `s` in its key. This is useful
for dealing with the output of
:func:`~skrf.io.general.load_all_touchstones`, which contains
Networks grouped by some kind of naming convention.
Parameters
------------
ntwk_dict : dictionary of Network objects
network dictionary that contains a set of keys `s`
s : string
string contained in the keys of ntwk_dict that are to be in the
NetworkSet that is returned
\*args,\*\*kwargs : passed to NetworkSet()
Returns
--------
ntwk_set : NetworkSet object
A NetworkSet that made from values of ntwk_dict with `s` in
their key
Examples
---------
>>>ntwk_dict = rf.load_all_touchstone('my_dir')
>>>set5v = getset(ntwk_dict,'5v')
>>>set10v = getset(ntwk_dict,'10v')
"""
ntwk_list = [ntwk_dict[k] for k in ntwk_dict if s in k]
if len(ntwk_list) > 0:
return NetworkSet( ntwk_list,*args, **kwargs)
else:
print('Warning: No keys in ntwk_dict contain \'%s\''%s)
return None
def tuner_constellation(name='tuner', singlefreq=76, Z0=50, r_lin = 9, phi_lin=21, TNWformat=True):
r = npy.linspace(0.1,0.9,r_lin)
a = npy.linspace(0,2*npy.pi,phi_lin)
r_, a_ = npy.meshgrid(r,a)
c_ = r_ *npy.exp(1j * a_)
g= c_.flatten()
x = npy.real(g)
y = npy.imag(g)
if TNWformat :
TNL = dict()
# for ii, gi in enumerate(g) :
for ii, gi in enumerate(g) :
TNL['pos'+str(ii)] = Network(f = [singlefreq ], s=[[[gi]]], z0=[[Z0]], name=name +'_' + str(ii))
TNW = NetworkSet(TNL, name=name)
return TNW, x,y,g
else :
return x,y,g
|
|
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
import numpy
import paddle.v2 as paddle
BATCH_SIZE = 100
scope = core.Scope()
place = core.CPUPlace()
# if you want to test GPU training, you can use gpu place
# place = core.GPUPlace(0)
dev_ctx = core.DeviceContext.create(place)
init_net = core.Net.create()
forward_net = core.Net.create()
backward_net = None
optimize_net = core.Net.create()
def atomic_id():
id = 0
while True:
yield id
id += 1
uniq_id = atomic_id().next
def data_layer(name, dims):
var = scope.new_var(name)
tensor = var.get_tensor()
tensor.set_dims(dims) # 1 is batch size holder.
return name
def feed_data(name, data):
assert isinstance(data, numpy.ndarray)
tensor = scope.find_var(name).get_tensor()
tensor.set_dims(data.shape)
if data.dtype == numpy.dtype("int32"):
tensor.alloc_int(place)
elif data.dtype == numpy.dtype("float32"):
tensor.alloc_float(place)
else:
raise ValueError("data type not supported")
tensor.set(data, place)
def grad_var_name(var_name):
return var_name + "@GRAD"
def sgd_optimizer(net, param_name, learning_rate=0.005):
grad_name = grad_var_name(param_name)
optimize_op = Operator(
"sgd",
param=param_name,
grad=grad_name,
param_out=param_name,
learning_rate=learning_rate)
net.append_op(optimize_op)
# should use operator and add these to the init_network
def init_param(net, param_name, dims):
scope.new_var(param_name)
op = Operator(
"uniform_random", Out=param_name, dims=dims, min=-0.5, max=0.5, seed=10)
op.infer_shape(scope)
net.append_op(op)
# fc_layer
def fc_layer(net, input, size, act="softmax", bias=True, param=None, name=None):
"""
The fully connected layer.
:param input: The name of input variable.
:type input: str
:param size: The size of fully connected layer.
:param act: The name of activation.
:param param: The attribute of learnable parameter which can be used to
modify initialization mean and std of the parameter.
:param bias: The attribute of bias. If set False, this layer does not have
a bias.
:param name: The name of this layer. If it is not set explictly, a name
will be generated automatically.
:return: The name of the output variable.
"""
if name is None:
name = "fc_%d" % uniq_id()
if not isinstance(name, str):
raise ValueError("The name of a layer should be a string.")
input_dims = scope.find_var(input).get_tensor().get_dims()
w_name = param or name + ".w"
init_param(net=init_net, param_name=w_name, dims=[input_dims[1], size])
sgd_optimizer(net=optimize_net, param_name=w_name, learning_rate=0.01)
pre_activation = name + ".mul.out"
scope.new_var(pre_activation)
mul_op = Operator("mul", X=input, Y=w_name, Out=pre_activation)
net.append_op(mul_op)
# create bias variable if needed
if bias:
bias_name = name + ".b"
init_param(net=init_net, param_name=bias_name, dims=[size])
sgd_optimizer(
net=optimize_net, param_name=bias_name, learning_rate=0.001)
bias_out = name + ".rowwise_add.out"
scope.new_var(bias_out)
rowwise_append_op = Operator(
"rowwise_add", X=pre_activation, b=bias_name, Out=bias_out)
net.append_op(rowwise_append_op)
pre_activation = bias_out
activation_op = Operator(act, X=pre_activation, Y=name)
net.append_op(activation_op)
scope.new_var(name)
net.infer_shape(scope)
return name
def cross_entropy_layer(net, input, label):
cost_name = "cross_entropy_%d" % uniq_id()
cross_entropy_op = Operator(
"onehot_cross_entropy", X=input, label=label, Y=cost_name)
net.append_op(cross_entropy_op)
scope.new_var(cost_name)
net.infer_shape(scope)
return cost_name
def create_backward_net(forward_net):
net = core.Operator.backward(forward_net, set())
for input in net.inputs()["all"]:
var = scope.new_var(input)
var.get_tensor()
for output in net.outputs()["all"]:
var = scope.new_var(output)
var.get_tensor()
return net
def debug_print_op(op):
print("===============" + op.type() + "==============")
print("***inputs:***")
for input in op.inputs()["all"]:
print input, scope.find_var(input).get_tensor().get_dims()
print("\n***outputs:***")
for output in op.outputs()["all"]:
print output, scope.find_var(output).get_tensor().get_dims()
print("")
print("")
def set_cost(cost):
cost_shape = numpy.array(scope.find_var(cost).get_tensor()).shape
cost_grad = \
scope.find_var(grad_var_name(cost)).get_tensor()
cost_grad.set_dims(cost_shape)
cost_grad.alloc_float(place)
cost_grad.set(numpy.ones(cost_shape).astype("float32"), place)
def get_cost_mean(cost):
cost_data = numpy.array(scope.find_var(cost).get_tensor())
return cost_data.sum() / len(cost_data)
def error_rate(predict, label):
predict_var = numpy.array(scope.find_var(predict).get_tensor()).argmax(
axis=1)
label = numpy.array(scope.find_var(label).get_tensor())
error_num = numpy.sum(predict_var != label)
return error_num / float(len(label))
images = data_layer(name="pixel", dims=[BATCH_SIZE, 784])
labels = data_layer(name="label", dims=[BATCH_SIZE])
fc1 = fc_layer(net=forward_net, input=images, size=100, act="sigmoid")
fc2 = fc_layer(net=forward_net, input=fc1, size=100, act="sigmoid")
predict = fc_layer(net=forward_net, input=fc2, size=10, act="softmax")
cost = cross_entropy_layer(net=forward_net, input=predict, label=labels)
init_net.complete_add_op(True)
forward_net.complete_add_op(True)
backward_net = create_backward_net(forward_net)
optimize_net.complete_add_op(True)
print(init_net)
print(forward_net)
print(backward_net)
print(optimize_net)
debug_print_op(forward_net)
debug_print_op(backward_net)
debug_print_op(optimize_net)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=BATCH_SIZE)
def test(cost_name):
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
cost = []
error = []
for data in test_reader():
image_data = numpy.array(map(lambda x: x[0], data)).astype("float32")
label_data = numpy.array(map(lambda x: x[1], data)).astype("int32")
feed_data(images, image_data)
feed_data(labels, label_data)
forward_net.infer_shape(scope)
forward_net.run(scope, dev_ctx)
cost.append(get_cost_mean(cost_name))
error.append(error_rate(predict, "label"))
print("cost=" + str(sum(cost) / float(len(cost))) + " error_rate=" + str(
sum(error) / float(len(error))))
PASS_NUM = 1
init_net.run(scope, dev_ctx)
for pass_id in range(PASS_NUM):
batch_id = 0
for data in train_reader():
image_data = numpy.array(map(lambda x: x[0], data)).astype("float32")
label_data = numpy.array(map(lambda x: x[1], data)).astype("int32")
feed_data(images, image_data)
feed_data(labels, label_data)
forward_net.infer_shape(scope)
forward_net.run(scope, dev_ctx)
set_cost(cost)
backward_net.infer_shape(scope)
backward_net.run(scope, dev_ctx)
optimize_net.run(scope, dev_ctx)
if batch_id % 100 == 0:
print("pass[" + str(pass_id) + "] batch_id[" + str(batch_id) + "]")
test(cost)
batch_id = batch_id + 1
|
|
'''
Copyright (c) 2011-2014, Agora Games, LLC All rights reserved.
https://github.com/agoragames/haigha/blob/master/LICENSE.txt
'''
from chai import Chai
from haigha.classes import transaction_class
from haigha.classes.protocol_class import ProtocolClass
from haigha.classes.transaction_class import TransactionClass
from haigha.frames.method_frame import MethodFrame
from haigha.writer import Writer
from collections import deque
class TransactionClassTest(Chai):
def setUp(self):
super(TransactionClassTest, self).setUp()
ch = mock()
ch.channel_id = 42
ch.logger = mock()
self.klass = TransactionClass(ch)
def test_init(self):
expect(ProtocolClass.__init__).args('foo', a='b')
klass = TransactionClass.__new__(TransactionClass)
klass.__init__('foo', a='b')
assert_equals(
{
11: klass._recv_select_ok,
21: klass._recv_commit_ok,
31: klass._recv_rollback_ok,
}, klass.dispatch_map)
assert_false(klass._enabled)
assert_equals(deque(), klass._select_cb)
assert_equals(deque(), klass._commit_cb)
assert_equals(deque(), klass._rollback_cb)
def test_cleanup(self):
self.klass._cleanup()
assert_equals(None, self.klass._select_cb)
assert_equals(None, self.klass._commit_cb)
assert_equals(None, self.klass._rollback_cb)
assert_equals(None, self.klass._channel)
assert_equals(None, self.klass.dispatch_map)
def test_properties(self):
self.klass._enabled = 'maybe'
assert_equals('maybe', self.klass.enabled)
def test_select_when_not_enabled_and_no_cb(self):
self.klass._enabled = False
expect(mock(transaction_class, 'MethodFrame')).args(
42, 90, 10).returns('frame')
expect(self.klass.send_frame).args('frame')
expect(self.klass.channel.add_synchronous_cb).args(
self.klass._recv_select_ok)
self.klass.select()
assert_true(self.klass.enabled)
assert_equals(deque([None]), self.klass._select_cb)
def test_select_when_not_enabled_with_cb(self):
self.klass._enabled = False
expect(mock(transaction_class, 'MethodFrame')).args(
42, 90, 10).returns('frame')
expect(self.klass.send_frame).args('frame')
expect(self.klass.channel.add_synchronous_cb).args(
self.klass._recv_select_ok)
self.klass.select(cb='foo')
assert_true(self.klass.enabled)
assert_equals(deque(['foo']), self.klass._select_cb)
def test_select_when_already_enabled(self):
self.klass._enabled = True
stub(self.klass.send_frame)
assert_equals(deque(), self.klass._select_cb)
self.klass.select()
assert_equals(deque(), self.klass._select_cb)
def test_recv_select_ok_with_cb(self):
cb = mock()
self.klass._select_cb.append(cb)
self.klass._select_cb.append(mock())
expect(cb)
self.klass._recv_select_ok('frame')
assert_equals(1, len(self.klass._select_cb))
assert_false(cb in self.klass._select_cb)
def test_recv_select_ok_without_cb(self):
self.klass._select_cb.append(None)
self.klass._select_cb.append(mock())
self.klass._recv_select_ok('frame')
assert_equals(1, len(self.klass._select_cb))
assert_false(None in self.klass._select_cb)
def test_commit_when_enabled_no_cb(self):
self.klass._enabled = True
expect(mock(transaction_class, 'MethodFrame')).args(
42, 90, 20).returns('frame')
expect(self.klass.send_frame).args('frame')
expect(self.klass.channel.add_synchronous_cb).args(
self.klass._recv_commit_ok)
assert_equals(deque(), self.klass._commit_cb)
self.klass.commit()
assert_equals(deque([None]), self.klass._commit_cb)
def test_commit_when_enabled_with_cb(self):
self.klass._enabled = True
expect(mock(transaction_class, 'MethodFrame')).args(
42, 90, 20).returns('frame')
expect(self.klass.send_frame).args('frame')
expect(self.klass.channel.add_synchronous_cb).args(
self.klass._recv_commit_ok)
self.klass._commit_cb = deque(['blargh'])
self.klass.commit(cb='callback')
assert_equals(deque(['blargh', 'callback']), self.klass._commit_cb)
def test_commit_raises_transactionsnotenabled_when_not_enabled(self):
self.klass._enabled = False
assert_raises(
TransactionClass.TransactionsNotEnabled, self.klass.commit)
def test_recv_commit_ok_with_cb(self):
cb = mock()
self.klass._commit_cb.append(cb)
self.klass._commit_cb.append(mock())
expect(cb)
self.klass._recv_commit_ok('frame')
assert_equals(1, len(self.klass._commit_cb))
assert_false(cb in self.klass._commit_cb)
def test_recv_commit_ok_without_cb(self):
self.klass._commit_cb.append(None)
self.klass._commit_cb.append(mock())
self.klass._recv_commit_ok('frame')
assert_equals(1, len(self.klass._commit_cb))
assert_false(None in self.klass._commit_cb)
def test_rollback_when_enabled_no_cb(self):
self.klass._enabled = True
expect(mock(transaction_class, 'MethodFrame')).args(
42, 90, 30).returns('frame')
expect(self.klass.send_frame).args('frame')
expect(self.klass.channel.add_synchronous_cb).args(
self.klass._recv_rollback_ok)
assert_equals(deque(), self.klass._rollback_cb)
self.klass.rollback()
assert_equals(deque([None]), self.klass._rollback_cb)
def test_rollback_when_enabled_with_cb(self):
self.klass._enabled = True
expect(mock(transaction_class, 'MethodFrame')).args(
42, 90, 30).returns('frame')
expect(self.klass.send_frame).args('frame')
expect(self.klass.channel.add_synchronous_cb).args(
self.klass._recv_rollback_ok)
self.klass._rollback_cb = deque(['blargh'])
self.klass.rollback(cb='callback')
assert_equals(deque(['blargh', 'callback']), self.klass._rollback_cb)
def test_rollback_raises_transactionsnotenabled_when_not_enabled(self):
self.klass._enabled = False
assert_raises(
TransactionClass.TransactionsNotEnabled, self.klass.rollback)
def test_recv_rollback_ok_with_cb(self):
cb = mock()
self.klass._rollback_cb.append(cb)
self.klass._rollback_cb.append(mock())
expect(cb)
self.klass._recv_rollback_ok('frame')
assert_equals(1, len(self.klass._rollback_cb))
assert_false(cb in self.klass._rollback_cb)
def test_recv_rollback_ok_without_cb(self):
self.klass._rollback_cb.append(None)
self.klass._rollback_cb.append(mock())
self.klass._recv_rollback_ok('frame')
assert_equals(1, len(self.klass._rollback_cb))
assert_false(None in self.klass._rollback_cb)
|
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for trimmed_match.design.tests.util."""
import numpy as np
import pandas as pd
from trimmed_match.design import common_classes
from trimmed_match.design import util
import unittest
TimeWindow = common_classes.TimeWindow
class UtilTest(unittest.TestCase):
def setUp(self):
"""This method will be run before each of the test methods in the class."""
super().setUp()
self.df = pd.DataFrame({
'date': ['2020-10-09', '2020-10-10', '2020-10-11'] * 4,
'geo': [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
'response': [10, 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40],
'cost': [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0],
})
def testCalculateMinDetectableIroasValueError(self):
with self.assertRaises(ValueError):
util.CalculateMinDetectableIroas(-0.1, 0.5)
with self.assertRaises(ValueError):
util.CalculateMinDetectableIroas(0.1, 1.1)
def testCalculateMinDetectableIroas(self):
calc_min_detectable_iroas = util.CalculateMinDetectableIroas(
significance_level=0.1, power_level=0.9)
self.assertAlmostEqual(2.56, calc_min_detectable_iroas.at(1.0), places=2)
def testFindDaysToExclude(self):
day_week_exclude = [
'2020/10/10', '2020/11/10-2020/12/10', '2020/08/10']
days_to_remove = util.find_days_to_exclude(day_week_exclude)
expected_days = [
TimeWindow(pd.Timestamp('2020-10-10'), pd.Timestamp('2020-10-10')),
TimeWindow(pd.Timestamp('2020-11-10'), pd.Timestamp('2020-12-10')),
TimeWindow(pd.Timestamp('2020-08-10'), pd.Timestamp('2020-08-10')),
]
for x in range(len(expected_days)):
self.assertEqual(days_to_remove[x].first_day, expected_days[x].first_day)
self.assertEqual(days_to_remove[x].last_day, expected_days[x].last_day)
def testWrongDateFormat(self):
incorrect_day = ['2020/13/13', '2020/03/03']
with self.assertRaises(ValueError):
util.find_days_to_exclude(incorrect_day)
incorrect_time_window = ['2020/10/13 - 2020/13/11', '2020/03/03']
with self.assertRaises(ValueError):
util.find_days_to_exclude(incorrect_time_window)
incorrect_format = ['2020/10/13 - 2020/13/11 . 2020/10/10']
with self.assertRaises(ValueError):
util.find_days_to_exclude(incorrect_format)
def testExpandTimeWindows(self):
day_week_exclude = [
'2020/10/10', '2020/11/10-2020/12/10', '2020/08/10']
days_to_remove = util.find_days_to_exclude(day_week_exclude)
periods = util.expand_time_windows(days_to_remove)
expected = [
pd.Timestamp('2020-10-10', freq='D'),
pd.Timestamp('2020-08-10', freq='D'),
]
expected += pd.date_range(start='2020-11-10', end='2020-12-10', freq='D')
self.assertEqual(len(periods), len(expected))
for x in periods:
self.assertIn(x, expected)
def testCheckNoOverlap(self):
expected = 0.0
dates_left = ['2020-03-02', '2020-03-03']
dates_right = ['2020-03-04', '2020-03-05']
percentage = util.overlap_percent(dates_left, dates_right)
self.assertEqual(percentage, expected)
def testCheckOverlap(self):
expected = 50.0
dates_left = ['2020-03-02', '2020-03-04']
dates_right = ['2020-03-04', '2020-03-05']
percentage = util.overlap_percent(dates_left, dates_right)
self.assertEqual(percentage, expected)
def testFindFrequency(self):
dates = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='D'))
geos = [1, 2, 3, 4]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
df.set_index(['geo', 'date'], inplace=True)
frequency = util.infer_frequency(df, 'date', 'geo')
self.assertEqual(frequency, 'D')
weeks = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='W'))
df = pd.DataFrame({
'date': weeks * len(geos),
'geo': sorted(geos * len(weeks))
})
df.set_index(['geo', 'date'], inplace=True)
frequency = util.infer_frequency(df, 'date', 'geo')
self.assertEqual(frequency, 'W')
def testDifferentFrequencies(self):
dates = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='D'))
weeks = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='W'))
geos = [1] * len(dates) + [2] * len(weeks)
df = pd.DataFrame({
'date': dates + weeks,
'geo': geos
})
df.set_index(['geo', 'date'], inplace=True)
with self.assertRaises(ValueError) as cm:
_ = util.infer_frequency(df, 'date', 'geo')
self.assertEqual(
str(cm.exception),
'The provided time series seem to have irregular frequencies.')
def testFindFrequencyDataNotSorted(self):
dates = list(pd.date_range(start='2020-01-01', end='2020-02-01', freq='D'))
geos = [1, 2, 3, 4]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
# permute the order of the rows, so that the dataset is not sorted by date
df = df.sample(frac=1, replace=False)
df.set_index(['geo', 'date'], inplace=True)
frequency = util.infer_frequency(df, 'date', 'geo')
self.assertEqual(frequency, 'D')
def testInsufficientData(self):
dates = list(pd.date_range(start='2020-01-01', end='2020-01-01', freq='D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
df.set_index(['geo', 'date'], inplace=True)
with self.assertRaises(ValueError) as cm:
_ = util.infer_frequency(df, 'date', 'geo')
self.assertEqual(
str(cm.exception),
'At least one series with more than one observation must be provided.')
def testUnknownFrequency(self):
dates = list(pd.to_datetime(['2020-10-10', '2020-10-13', '2020-10-16']))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
df.set_index(['geo', 'date'], inplace=True)
with self.assertRaises(ValueError) as cm:
_ = util.infer_frequency(df, 'date', 'geo')
self.assertEqual(str(cm.exception),
'Frequency could not be identified. Got 3 days.')
def testNoMissingDates(self):
dates = list(pd.date_range(start='2020-01-01', periods=28, freq='D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
missing = util.find_missing_dates(
geox_data=df,
start_date=pd.Timestamp('2020-01-01'),
period_duration_weeks=4,
number_of_observations=28,
frequency='D')
self.assertListEqual(missing, [])
def testFindMissingDates(self):
dates = list(pd.date_range(start='2020-01-01', periods=28, freq='D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
missing = util.find_missing_dates(
geox_data=df,
start_date=pd.Timestamp('2020-01-02'),
period_duration_weeks=4,
number_of_observations=28,
frequency='D')
self.assertEqual(missing, np.array(['2020-01-29']))
def testCheckValidPeriods(self):
dates = list(pd.date_range(start='2020-01-01', periods=28*2, freq='D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
out = util.check_time_periods(
geox_data=df,
start_date_eval=pd.Timestamp('2020-01-01'),
start_date_aa_test=pd.Timestamp('2020-01-29'),
experiment_duration_weeks=4,
frequency='D')
self.assertTrue(out)
def testCheckValidPeriodsInferredFrequency(self):
dates = list(pd.date_range(start='2020-01-01', periods=28*2, freq='D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
out = util.check_time_periods(
geox_data=df,
start_date_eval=pd.Timestamp('2020-01-01'),
start_date_aa_test=pd.Timestamp('2020-01-29'),
experiment_duration_weeks=4,
frequency='infer')
self.assertTrue(out)
def testCheckValidPeriodsWeekly(self):
dates = list(pd.date_range(start='2020-01-01', periods=4*2, freq='W'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
out = util.check_time_periods(
geox_data=df,
start_date_eval=pd.Timestamp('2020-01-01'),
start_date_aa_test=pd.Timestamp('2020-01-29'),
experiment_duration_weeks=4,
frequency='W')
self.assertTrue(out)
out_infer = util.check_time_periods(
geox_data=df,
start_date_eval=pd.Timestamp('2020-01-01'),
start_date_aa_test=pd.Timestamp('2020-01-29'),
experiment_duration_weeks=4,
frequency='infer')
self.assertTrue(out_infer)
def testInvalidPeriods(self):
dates = list(pd.date_range(start='2020-01-01', periods=27 * 2, freq='D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
missing = ['2020-02-24', '2020-02-25']
freq_str = 'days'
with self.assertRaises(ValueError) as cm:
_ = util.check_time_periods(
geox_data=df,
start_date_eval=pd.Timestamp('2020-01-01'),
start_date_aa_test=pd.Timestamp('2020-01-29'),
experiment_duration_weeks=4,
frequency='D')
self.assertEqual(
str(cm.exception),
(f'The AA test period contains the following {freq_str} ' +
f'{missing} for which we do not have data.'))
def testInvalidPeriodsWeekly(self):
dates = list(pd.date_range(start='2020-01-01', periods=7, freq='7D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
missing = ['2020-02-19']
freq_str = 'weeks'
with self.assertRaises(ValueError) as cm:
_ = util.check_time_periods(
geox_data=df,
start_date_eval=pd.Timestamp('2020-01-01'),
start_date_aa_test=pd.Timestamp('2020-01-29'),
experiment_duration_weeks=4,
frequency='W')
self.assertEqual(
str(cm.exception),
(f'The AA test period contains the following {freq_str} ' +
f'{missing} for which we do not have data.'))
def testInvalidPeriodsWeeklyMiddle(self):
dates = list(pd.date_range(start='2020-01-01', periods=8, freq='7D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
df.drop(df[df['date'] == '2020-01-08'].index, inplace=True)
missing = ['2020-01-08']
freq_str = 'weeks'
with self.assertRaises(ValueError) as cm:
_ = util.check_time_periods(
geox_data=df,
start_date_eval=pd.Timestamp('2020-01-01'),
start_date_aa_test=pd.Timestamp('2020-01-29'),
experiment_duration_weeks=4,
frequency='W')
self.assertEqual(
str(cm.exception),
(f'The evaluation period contains the following {freq_str} ' +
f'{missing} for which we do not have data.'))
def testInvalidFrequency(self):
dates = list(pd.date_range(start='2020-01-01', periods=8, freq='7D'))
geos = [1, 2]
df = pd.DataFrame({
'date': dates * len(geos),
'geo': sorted(geos * len(dates))
})
with self.assertRaises(ValueError) as cm:
_ = util.check_time_periods(
geox_data=df,
start_date_eval=pd.Timestamp('2020-01-01'),
start_date_aa_test=pd.Timestamp('2020-01-29'),
experiment_duration_weeks=4,
frequency='invalid')
self.assertEqual(
str(cm.exception),
'frequency should be one of ["infer", "D", "W"], got invalid')
def testHumanReadableFormat(self):
numbers = [123, 10765, 13987482, 8927462746, 1020000000000]
numb_formatted = [
util.human_readable_number(num) for num in numbers
]
self.assertEqual(numb_formatted, ['123', '10.8K', '14M', '8.93B', '1.02tn'])
def testFlagPercentageValue(self):
output = util.flag_percentage_value(val='10 %', value=9.0, operation='>')
self.assertEqual(output, 'color: red')
output = util.flag_percentage_value(val='10 %', value=10.1, operation='>')
self.assertEqual(output, 'color: black')
output = util.flag_percentage_value(val='10 %', value=9.0, operation='<')
self.assertEqual(output, 'color: black')
output = util.flag_percentage_value(val='10 %', value=10.1, operation='<')
self.assertEqual(output, 'color: red')
def testCreateOutputTable(self):
results = pd.DataFrame({
'num_pairs': [5, 4, 5, 4],
'experiment_response': [200, 100, 200, 100],
'experiment_spend': [20, 10, 20, 10],
'spend_response_ratio': [0.1, 0.1, 0.1, 0.1],
'budget': [1000, 1000, 500, 500],
'iroas': [0, 0, 0, 0],
'rmse': [1, 0.5, 2, 1],
'rmse_cost_adjusted': [1, 0.625, 2, 1.25],
'proportion_cost_in_experiment': [1, 0.8, 1, 0.8]
})
budgets_for_design = [500, 1000]
total_response = 300
total_spend = 25
geo_treatment = pd.DataFrame({
'geo': [1, 2, 3, 4],
'pair': [1, 2, 3, 4],
'response': [10, 3, 1, 4],
'spend': [1, 1.5, 0.5, 4]
})
average_order_value = 1
num_geos = 8
output = util.create_output_table(results, total_response, total_spend,
geo_treatment, budgets_for_design,
average_order_value, num_geos)
rmse_multiplier = 2.123172
minimum_detectable_iroas = [rmse_multiplier * 1, rmse_multiplier * 0.5]
minimum_detectable_lift = [
minimum_detectable_iroas[x] * budgets_for_design[x] * 100 /
geo_treatment['response'].sum()
for x in range(len(minimum_detectable_iroas))
]
minimum_detectable_lift = [f'{x:.2f} %' for x in minimum_detectable_lift]
minimum_detectable_iroas = [f'{x:.3}' for x in minimum_detectable_iroas]
expected_output = pd.DataFrame({
'Budget': ['500', '1K'],
'Minimum detectable iROAS': minimum_detectable_iroas,
'Minimum detectable lift in response': minimum_detectable_lift,
'Treatment/control/excluded geos': ['4 / 4 / 0', '4 / 4 / 0'],
'Revenue covered by treatment group': ['6.00 %', '6.00 %'],
'Cost/baseline response': ['2777.78 %', '5555.56 %'],
'Cost if test budget is scaled nationally': ['1.79K', '3.57K']
})
for col in output.columns:
print(output[col])
print(expected_output[col])
self.assertTrue(output.equals(expected_output))
def testCheckInputData(self):
temp_df = self.df.copy()
# remove one observation for geo #2
temp_df = temp_df[~((temp_df['geo'] == 2) &
(temp_df['date'] == '2020-10-10'))]
geox_data = util.check_input_data(temp_df)
expected_df = pd.DataFrame({
'date': pd.to_datetime(['2020-10-09', '2020-10-10', '2020-10-11'] * 4),
'geo': [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
'response': [10, 10, 10, 20, 0.0, 20, 30, 30, 30, 40, 40, 40],
'cost': [1.0, 1.0, 1.0, 2.0, 0.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0],
}).sort_values(by=['date', 'geo']).reset_index(drop=True)
self.assertTrue(geox_data.equals(expected_df))
def testCheckInputDataColumns(self):
temp_df = self.df.copy()
# remove the column date
temp_df.drop(columns='date', inplace=True)
with self.assertRaisesRegex(
ValueError,
'The mandatory columns {\'date\'} are missing from the input data'
):
util.check_input_data(temp_df)
def testCheckInputDataDuplicatedDateGeo(self):
temp_df = self.df.copy().append(pd.DataFrame(
{'date': pd.to_datetime(['2020-01-01', '2020-01-01']),
'geo': [1, 1],
'response': [0, 1],
'cost': [0, 1]}))
with self.assertRaisesRegex(
ValueError, 'There are duplicated date geo pairs.'
):
util.check_input_data(temp_df)
def testCheckInputDataUnableToConvertToNumeric(self):
temp_df = self.df.copy()
# change the column response to something which cannot be converted to
# numeric
temp_df['response'] = ['na', 10, 10, 20, 20, 20, 30, 30, 30, 40, 40, 40]
with self.assertRaisesRegex(
ValueError, 'Unable to convert column response to numeric.'):
util.check_input_data(temp_df)
def testCheckInputDataWithMultipleColumnsToImpute(self):
temp_df = self.df.copy()
# remove one observation for geo #2
temp_df = temp_df[~((temp_df['geo'] == 2) &
(temp_df['date'] == '2020-10-10'))]
temp_df['numeric_col'] = 1
geox_data = util.check_input_data(temp_df,
['response', 'cost', 'numeric_col'])
expected_df = pd.DataFrame({
'date': pd.to_datetime(['2020-10-09', '2020-10-10', '2020-10-11'] * 4),
'geo': [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
'response': [10, 10, 10, 20, 0.0, 20, 30, 30, 30, 40, 40, 40],
'cost': [1.0, 1.0, 1.0, 2.0, 0.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0],
'numeric_col': [
1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
],
}).sort_values(by=['date', 'geo']).reset_index(drop=True)
self.assertTrue(geox_data.equals(expected_df))
def testPairingNotInAList(self):
"""Checks an error is raised if pairs are not passed in a list."""
# geo 1 and 2 appear in two pairs.
pairs = pd.DataFrame({
'geo1': [1, 2, 2],
'geo2': [3, 4, 1],
'pair': [1, 2, 3]
})
with self.assertRaisesRegex(ValueError,
r'pairs must be a list of dataframes.'):
util.check_pairs(
pretest_data=self.df,
pairs=pairs)
def testPairingWithDuplicatedGeos(self):
"""Checks an error is raised if a geo appears in multiple pairs."""
# geo 1 and 2 appear in two pairs.
pairs = [
pd.DataFrame({
'geo1': [1, 2, 2],
'geo2': [3, 4, 1],
'pair': [1, 2, 3]
})
]
with self.assertRaisesRegex(
ValueError, f'Some geos are duplicated in the pairing {pairs[0]}.'):
util.check_pairs(
pretest_data=self.df,
pairs=pairs)
def testPairingWithMoreThanTwoGeosPerPair(self):
"""Checks an error is raised if a pair appears multiple times."""
# geo 1 and 2 appear in two pairs.
pairs = [
pd.DataFrame({
'geo1': [1, 2],
'geo2': [3, 4],
'pair': [1, 1]
})
]
with self.assertRaisesRegex(
ValueError, r'a pair should only have two geos.'):
util.check_pairs(
pretest_data=self.df,
pairs=pairs)
def testPairingWithGeosNotInPretestData(self):
"""Raises an error if a geo appears in the pairs but not in the data."""
# geo 5 and 6 appear in the pairs but not in the pretest data.
pairs = [pd.DataFrame({
'geo1': [1, 2, 5],
'geo2': [3, 4, 6],
'pair': [1, 2, 3]
})]
with self.assertRaisesRegex(ValueError,
r'The geos ' +
r'{5, 6} appear ' +
r'in the pairs but not in the pretest data.'):
util.check_pairs(
pretest_data=self.df,
pairs=pairs)
if __name__ == '__main__':
unittest.main()
|
|
import logging
import aiohttp
from aiohttp import ClientError
from duniterpy.api import bma, errors
from duniterpy.documents import BMAEndpoint, SecuredBMAEndpoint
from sakia.errors import NoPeerAvailable
from pkg_resources import parse_version
from socket import gaierror
import asyncio
import random
import jsonschema
import attr
import copy
async def parse_responses(responses):
result = (False, "")
for r in responses:
try:
if not result[0]:
if isinstance(r, BaseException):
result = (False, str(r))
elif r.status == 400:
error = await r.text()
try:
result = (False, errors.DuniterError(bma.parse_error(error)).message)
except jsonschema.ValidationError:
result = (False, error)
elif r.status == 200:
result = (True, (await r.json()))
elif not result[0]:
result = (False, (await r.text()))
else:
await r.release()
except Exception as e:
if not result[0]:
result = (False, str(e))
return result
def filter_endpoints(request, nodes):
def compare_versions(node, version):
if node.version and node.version != '':
try:
return parse_version(node.version) >= parse_version(version)
except TypeError:
return False
else:
return True
filters = {
bma.ud.history: lambda n: compare_versions(n, "0.11.0"),
bma.tx.history: lambda n: compare_versions(n, "0.11.0"),
bma.blockchain.membership: lambda n: compare_versions(n, "0.14")
}
if request in filters:
nodes = [n for n in nodes if filters[request](n)]
endpoints = []
for n in nodes:
endpoints += [e for e in n.endpoints if type(e) in (BMAEndpoint, SecuredBMAEndpoint)]
return endpoints
def make_hash(o):
"""
Makes a hash from a dictionary, list, tuple or set to any level, that contains
only other hashable types (including any lists, tuples, sets, and
dictionaries).
"""
if isinstance(o, (set, tuple, list)):
return tuple(sorted([make_hash(e) for e in o]))
elif not isinstance(o, dict):
return hash(o)
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = make_hash(v)
return hash(tuple(frozenset(sorted(new_o.items()))))
def _compare_json(first, second):
"""
Compare two json dicts
:param first: the first dictionnary
:param second: the second dictionnary
:return: True if the json dicts are the same
:rtype: bool
"""
def ordered(obj):
if isinstance(obj, dict):
try:
return sorted((k, ordered(v)) for k, v in obj.items())
except TypeError:
return obj
if isinstance(obj, list):
try:
return sorted(ordered(x) for x in obj)
except TypeError:
return obj
else:
return obj
return ordered(first) == ordered(second)
def _filter_data(request, data):
filtered = data
if request is bma.tx.history:
filtered = copy.deepcopy(data)
filtered["history"].pop("sending")
filtered["history"].pop("receiving")
filtered["history"].pop("pending")
elif request is bma.wot.requirements:
filtered = copy.deepcopy(data)
for idty in filtered["identities"]:
for c in idty["certifications"]:
c.pop("expiresIn")
idty.pop('membershipPendingExpiresIn')
return filtered
def _merge_lookups(answers_data):
if len(answers_data) == 1:
data = next((v for v in answers_data.values()))
if isinstance(data, errors.DuniterError):
raise data
lookup_data = {"partial": False,
"results": []}
for dict_hash in answers_data:
if not isinstance(answers_data[dict_hash], errors.DuniterError):
for data in answers_data[dict_hash]["results"]:
lookup_data["results"].append(data)
return lookup_data
def _best_answer(answers, answers_data, nb_verification):
best_dict_hash = next(k for k in answers.keys())
best_dict_hash_score = len(answers[best_dict_hash])
for dict_hash in answers:
if len(answers[dict_hash]) > best_dict_hash_score:
best_dict_hash = dict_hash
best_dict_hash_score = len(answers[dict_hash])
if len(answers[dict_hash]) >= nb_verification:
if isinstance(answers_data[dict_hash], errors.DuniterError):
raise answers_data[dict_hash]
else:
return answers_data[dict_hash]
if isinstance(answers_data[best_dict_hash], errors.DuniterError):
raise answers_data[best_dict_hash]
else:
return answers_data[best_dict_hash]
@attr.s()
class BmaConnector:
"""
This class is used to access BMA API.
"""
_nodes_processor = attr.ib()
_user_parameters = attr.ib()
_logger = attr.ib(default=attr.Factory(lambda: logging.getLogger('sakia')))
async def _verified_request(self, node, request):
try:
res = await request
self._nodes_processor.handle_success(node)
return res
except errors.DuniterError as e:
if e.ucode == errors.HTTP_LIMITATION:
self._logger.debug("Exception in responses : " + str(e))
self._nodes_processor.handle_failure(node)
else:
return e
except BaseException as e:
self._logger.debug(str(e))
self._nodes_processor.handle_failure(node)
return e
async def verified_get(self, currency, request, req_args):
# If no node is known as a member, lookup synced nodes as a fallback
synced_nodes = self._nodes_processor.synced_nodes(currency)
offline_nodes = self._nodes_processor.offline_synced_nodes(currency)
random_offline_node = random.sample(offline_nodes, min(1, len(offline_nodes)))
nodes_generator = (n for n in synced_nodes)
answers = {}
answers_data = {}
nb_verification = min(max(1, 0.66 * len(synced_nodes)), 3)
# We try to find agreeing nodes from one 1 to 66% of nodes, max 10
session = aiohttp.ClientSession()
filtered_data = {}
try:
while max([len(nodes) for nodes in answers.values()] + [0]) <= nb_verification:
futures = []
try:
for i in range(0, int(nb_verification*1.4)+1):
node = next(nodes_generator)
endpoints = filter_endpoints(request, [node])
if not endpoints:
continue
endpoint = random.choice(endpoints)
self._logger.debug(
"Requesting {0} on endpoint {1}".format(str(request.__name__), str(endpoint)))
futures.append(self._verified_request(node, request(next(
endpoint.conn_handler(session, proxy=self._user_parameters.proxy())),
**req_args)))
if random_offline_node:
futures.append(self._verified_request(random_offline_node[0], request(next(
endpoint.conn_handler(session, proxy=self._user_parameters.proxy())),
**req_args)))
except StopIteration:
# When no more node is available, we go out of the while loop
break
finally:
# Everytime we go out of the while loop, we gather the futures
if futures:
responses = await asyncio.gather(*futures, return_exceptions=True)
for r in responses:
if isinstance(r, errors.DuniterError):
if r.ucode == errors.HTTP_LIMITATION:
self._logger.debug("Exception in responses : " + r.message)
continue
else:
data_hash = hash(r.ucode)
elif isinstance(r, BaseException):
self._logger.debug("Exception in responses : " + str(r))
continue
else:
filtered_data = _filter_data(request, r)
data_hash = make_hash(filtered_data)
answers_data[data_hash] = r
if data_hash not in answers:
answers[data_hash] = [node]
else:
answers[data_hash].append(node)
finally:
await session.close()
if len(answers_data) > 0:
if request is bma.wot.lookup:
return _merge_lookups(answers_data)
else:
return _best_answer(answers, answers_data, nb_verification)
raise NoPeerAvailable("", len(synced_nodes))
async def simple_get(self, currency, request, req_args):
endpoints = filter_endpoints(request, self._nodes_processor.synced_nodes(currency))
tries = 0
while tries < 3 and endpoints:
endpoint = random.choice(endpoints)
endpoints.remove(endpoint)
try:
self._logger.debug("Requesting {0} on endpoint {1}".format(str(request.__name__), str(endpoint)))
async with aiohttp.ClientSession() as session:
json_data = await request(next(endpoint.conn_handler(session), **req_args))
return json_data
except errors.DuniterError as e:
if e.ucode == errors.HTTP_LIMITATION:
self._logger.debug(str(e))
tries += 1
else:
raise
except (ClientError, gaierror, asyncio.TimeoutError,
ValueError, jsonschema.ValidationError) as e:
self._logger.debug(str(e))
tries += 1
except AttributeError as e:
if ("feed_appdata", "do_handshake") in str(e):
self._logger.debug(str(e))
raise NoPeerAvailable("", len(endpoints))
async def get(self, currency, request, req_args={}, verify=True):
"""
:param str currency: the currency requested
:param class request: A bma request class calling for data
:param dict req_args: Arguments to pass to the request constructor
:param bool verify: Verify returned value against multiple nodes
:return: The returned data
"""
if verify:
return await self.verified_get(currency, request, req_args)
else:
return await self.simple_get(currency, request, req_args)
async def broadcast(self, currency, request, req_args={}):
"""
Broadcast data to a network.
Sends the data to all knew nodes.
:param str currency: the currency target
:param request: A duniterpy bma request class
:param req_args: Arguments to pass to the request constructor
:return: All nodes replies
:rtype: tuple of aiohttp replies
.. note:: If one node accept the requests (returns 200),
the broadcast should be considered accepted by the network.
"""
filtered_endpoints = filter_endpoints(request, self._nodes_processor.synced_nodes(currency))
endpoints = random.sample(filtered_endpoints, 6) if len(filtered_endpoints) > 6 else filtered_endpoints
replies = []
if len(endpoints) > 0:
async with aiohttp.ClientSession() as session:
for endpoint in endpoints:
self._logger.debug("Trying to connect to : " + str(endpoint))
reply = asyncio.ensure_future(request(next(endpoint.conn_handler(session,
proxy=self._user_parameters.proxy())),
**req_args))
replies.append(reply)
result = await asyncio.gather(*replies, return_exceptions=True)
return tuple(result)
else:
raise NoPeerAvailable("", len(endpoints))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from nova import exception
from nova import objects
from nova.objects import instance_mapping
from nova.tests.unit.objects import test_cell_mapping
from nova.tests.unit.objects import test_objects
def get_db_mapping(**updates):
db_mapping = {
'id': 1,
'instance_uuid': uuidutils.generate_uuid(),
'cell_id': None,
'project_id': 'fake-project',
'user_id': 'fake-user',
'created_at': None,
'updated_at': None,
'queued_for_delete': False,
}
db_mapping["cell_mapping"] = test_cell_mapping.get_db_mapping(id=42)
db_mapping['cell_id'] = db_mapping["cell_mapping"]["id"]
db_mapping.update(updates)
return db_mapping
class _TestInstanceMappingObject(object):
def _check_cell_map_value(self, db_val, cell_obj):
self.assertEqual(db_val, cell_obj.id)
@mock.patch.object(instance_mapping.InstanceMapping,
'_get_by_instance_uuid_from_db')
def test_get_by_instance_uuid(self, uuid_from_db):
db_mapping = get_db_mapping()
uuid_from_db.return_value = db_mapping
mapping_obj = objects.InstanceMapping().get_by_instance_uuid(
self.context, db_mapping['instance_uuid'])
uuid_from_db.assert_called_once_with(self.context,
db_mapping['instance_uuid'])
self.compare_obj(mapping_obj, db_mapping,
subs={'cell_mapping': 'cell_id'},
comparators={
'cell_mapping': self._check_cell_map_value})
@mock.patch.object(instance_mapping.InstanceMapping,
'_get_by_instance_uuid_from_db')
def test_get_by_instance_uuid_cell_mapping_none(self, uuid_from_db):
db_mapping = get_db_mapping(cell_mapping=None, cell_id=None)
uuid_from_db.return_value = db_mapping
mapping_obj = objects.InstanceMapping().get_by_instance_uuid(
self.context, db_mapping['instance_uuid'])
uuid_from_db.assert_called_once_with(self.context,
db_mapping['instance_uuid'])
self.compare_obj(mapping_obj, db_mapping,
subs={'cell_mapping': 'cell_id'})
@mock.patch.object(instance_mapping.InstanceMapping, '_create_in_db')
def test_create(self, create_in_db):
db_mapping = get_db_mapping()
uuid = db_mapping['instance_uuid']
create_in_db.return_value = db_mapping
mapping_obj = objects.InstanceMapping(self.context)
mapping_obj.instance_uuid = uuid
mapping_obj.cell_mapping = objects.CellMapping(self.context,
id=db_mapping['cell_mapping']['id'])
mapping_obj.project_id = db_mapping['project_id']
mapping_obj.user_id = db_mapping['user_id']
mapping_obj.create()
create_in_db.assert_called_once_with(self.context,
{'instance_uuid': uuid,
'queued_for_delete': False,
'cell_id': db_mapping['cell_mapping']['id'],
'project_id': db_mapping['project_id'],
'user_id': db_mapping['user_id']})
self.compare_obj(mapping_obj, db_mapping,
subs={'cell_mapping': 'cell_id'},
comparators={
'cell_mapping': self._check_cell_map_value})
@mock.patch.object(instance_mapping.InstanceMapping, '_create_in_db')
def test_create_cell_mapping_none(self, create_in_db):
db_mapping = get_db_mapping(cell_mapping=None, cell_id=None)
uuid = db_mapping['instance_uuid']
create_in_db.return_value = db_mapping
mapping_obj = objects.InstanceMapping(self.context)
mapping_obj.instance_uuid = uuid
mapping_obj.cell_mapping = None
mapping_obj.project_id = db_mapping['project_id']
mapping_obj.user_id = db_mapping['user_id']
mapping_obj.create()
create_in_db.assert_called_once_with(self.context,
{'instance_uuid': uuid,
'queued_for_delete': False,
'project_id': db_mapping['project_id'],
'user_id': db_mapping['user_id']})
self.compare_obj(mapping_obj, db_mapping,
subs={'cell_mapping': 'cell_id'})
self.assertIsNone(mapping_obj.cell_mapping)
@mock.patch.object(instance_mapping.InstanceMapping, '_create_in_db')
def test_create_cell_mapping_with_qfd_true(self, create_in_db):
db_mapping = get_db_mapping(cell_mapping=None, cell_id=None)
create_in_db.return_value = db_mapping
mapping_obj = objects.InstanceMapping(self.context)
mapping_obj.instance_uuid = db_mapping['instance_uuid']
mapping_obj.cell_mapping = None
mapping_obj.project_id = db_mapping['project_id']
mapping_obj.user_id = db_mapping['user_id']
mapping_obj.queued_for_delete = True
mapping_obj.create()
create_in_db.assert_called_once_with(self.context,
{'instance_uuid': db_mapping['instance_uuid'],
'queued_for_delete': True,
'project_id': db_mapping['project_id'],
'user_id': db_mapping['user_id']})
@mock.patch.object(instance_mapping.InstanceMapping, '_save_in_db')
def test_save(self, save_in_db):
db_mapping = get_db_mapping()
uuid = db_mapping['instance_uuid']
save_in_db.return_value = db_mapping
mapping_obj = objects.InstanceMapping(self.context)
mapping_obj.instance_uuid = uuid
mapping_obj.cell_mapping = objects.CellMapping(self.context, id=42)
mapping_obj.save()
save_in_db.assert_called_once_with(self.context,
db_mapping['instance_uuid'],
{'cell_id': mapping_obj.cell_mapping.id,
'instance_uuid': uuid})
self.compare_obj(mapping_obj, db_mapping,
subs={'cell_mapping': 'cell_id'},
comparators={
'cell_mapping': self._check_cell_map_value})
@mock.patch.object(instance_mapping.InstanceMapping, '_destroy_in_db')
def test_destroy(self, destroy_in_db):
uuid = uuidutils.generate_uuid()
mapping_obj = objects.InstanceMapping(self.context)
mapping_obj.instance_uuid = uuid
mapping_obj.destroy()
destroy_in_db.assert_called_once_with(self.context, uuid)
def test_cell_mapping_nullable(self):
mapping_obj = objects.InstanceMapping(self.context)
# Just ensure this doesn't raise an exception
mapping_obj.cell_mapping = None
def test_obj_make_compatible(self):
uuid = uuidutils.generate_uuid()
im_obj = instance_mapping.InstanceMapping(context=self.context)
fake_im_obj = instance_mapping.InstanceMapping(context=self.context,
instance_uuid=uuid,
queued_for_delete=False,
user_id='fake-user')
obj_primitive = fake_im_obj.obj_to_primitive('1.1')
obj = im_obj.obj_from_primitive(obj_primitive)
self.assertIn('queued_for_delete', obj)
self.assertNotIn('user_id', obj)
obj_primitive = fake_im_obj.obj_to_primitive('1.0')
obj = im_obj.obj_from_primitive(obj_primitive)
self.assertIn('instance_uuid', obj)
self.assertEqual(uuid, obj.instance_uuid)
self.assertNotIn('queued_for_delete', obj)
@mock.patch('nova.objects.instance_mapping.LOG.error')
def test_obj_load_attr(self, mock_log):
im_obj = instance_mapping.InstanceMapping()
# Access of unset user_id should have special handling
self.assertRaises(exception.ObjectActionError, im_obj.obj_load_attr,
'user_id')
msg = ('The unset user_id attribute of an unmigrated instance mapping '
'should not be accessed.')
mock_log.assert_called_once_with(msg)
# Access of any other unset attribute should fall back to base class
self.assertRaises(NotImplementedError, im_obj.obj_load_attr,
'project_id')
class TestInstanceMappingObject(test_objects._LocalTest,
_TestInstanceMappingObject):
pass
class TestRemoteInstanceMappingObject(test_objects._RemoteTest,
_TestInstanceMappingObject):
pass
class _TestInstanceMappingListObject(object):
def _check_cell_map_value(self, db_val, cell_obj):
self.assertEqual(db_val, cell_obj.id)
@mock.patch.object(instance_mapping.InstanceMappingList,
'_get_by_project_id_from_db')
def test_get_by_project_id(self, project_id_from_db):
db_mapping = get_db_mapping()
project_id_from_db.return_value = [db_mapping]
mapping_obj = objects.InstanceMappingList().get_by_project_id(
self.context, db_mapping['project_id'])
project_id_from_db.assert_called_once_with(self.context,
db_mapping['project_id'])
self.compare_obj(mapping_obj.objects[0], db_mapping,
subs={'cell_mapping': 'cell_id'},
comparators={
'cell_mapping': self._check_cell_map_value})
@mock.patch.object(instance_mapping.InstanceMappingList,
'_destroy_bulk_in_db')
def test_destroy_bulk(self, destroy_bulk_in_db):
uuids_to_be_deleted = []
for i in range(0, 5):
uuid = uuidutils.generate_uuid()
uuids_to_be_deleted.append(uuid)
destroy_bulk_in_db.return_value = 5
result = objects.InstanceMappingList.destroy_bulk(self.context,
uuids_to_be_deleted)
destroy_bulk_in_db.assert_called_once_with(self.context,
uuids_to_be_deleted)
self.assertEqual(5, result)
class TestInstanceMappingListObject(test_objects._LocalTest,
_TestInstanceMappingListObject):
pass
class TestRemoteInstanceMappingListObject(test_objects._RemoteTest,
_TestInstanceMappingListObject):
pass
|
|
import re
import copy
from collections import defaultdict
content = []
# where we keep copies
clipboard = {}
vulnerability = '00'
seat = '0'
# meta information about the BML-file, supported:
# TITLE = the name of the system
# DESCRIPTION = a short summary of the system
# AUTHOR = the system's author(s)
# data in meta is only set once, and isn't overwritten
meta = defaultdict(str)
class Diagram:
"""A structure for deal diagrams"""
# each hand can be None or a tuple of four strings (s, h, d, c)
north = None
east = None
south = None
west = None
dealer = None # can be None or "N", "E", "S", "W"
vul = None # can be None or "ALL", "NO", "NS", "EW"
board = None # can be None or a string
lead = None # can be None or a string tuple ([shdc], [2-9TAKQJ])"
contract = None # can be None or a tuple of strings
def __init__(self, firstrow, hands):
for h in hands:
hand = h[0]
if hand == 'N':
self.north = h[1:]
elif hand == 'E':
self.east = h[1:]
elif hand == 'S':
self.south = h[1:]
elif hand == 'W':
self.west = h[1:]
dealer = re.search(r'(?:\A|\s)([NESW]),?(?:\Z|\s)', firstrow)
if dealer:
self.dealer = dealer.group(1)
vul = re.search(r'(?:\A|\s)(All|None|EW|NS),?(?:\Z|\s)', firstrow)
if vul:
self.vul = vul.group(1)
board = re.search(r'(?:\A|\s)#?(\d+),?(?:\Z|\s)', firstrow)
if board:
self.board = board.group(1)
lead = re.search(r'(?:\A|\s)([shdc])([2-9AKQJT]),?(?:\Z|\s)', firstrow)
if lead:
self.lead = lead.groups()
contract = re.search(r'(?:\A|\s)(PASS),?(?:\Z|\s)', firstrow)
if contract:
self.contract = ('P', None, None, None)
else:
contract = re.search(r'(?:\A|\s)(\d)([SHDCN])(XX?)?([NESW]),?(?:\Z|\s)', firstrow)
if contract:
self.contract = contract.groups()
class Node:
"""A node in a bidding table"""
def __init__(self, bid, desc, indentation, parent=None, desc_indentation=-1):
self.vul = '00'
self.seat = '0'
self.export = True
self.bid = bid
self.desc = desc
self.indentation = indentation
self.desc_indentation = desc_indentation
self.children = []
self.parent = parent
bid = re.sub(r'[-;]', '', bid)
bid = bid.replace('NT', 'N')
self.bidrepr = bid
bids = re.findall(r'\d[A-Za-z]+', self.bidrepr)
if bids and not '(' in self.bidrepr:
self.bidrepr = 'P'.join(bids)
def add_child(self, bid, desc, indentation, desc_indentation):
"""appends a new child Node to the node"""
child = Node(bid, desc, indentation, self, desc_indentation)
child.vul = self.vul
child.seat = self.seat
self.children.append(child)
return self.children[-1]
def get_sequence(self):
"""List with all the parent, and the current, bids"""
if self.parent.bidrepr == 'root':
return [self.bidrepr]
if self.parent:
ps = self.parent.get_sequence()
ps.append(self.bidrepr)
return ps
def set_children(self, children):
"""Used when copying from another Node"""
self.children = copy.deepcopy(children)
for c in self.children:
c.parent = self
# c.vul = self.vul
# c.seat = self.vul
def __getitem__(self, arg):
return self.children[arg]
def create_bidtree(text):
global clipboard, vulnerability, seat
root = Node('root', 'root', -1)
root.vul = vulnerability
root.seat = seat
lastnode = root
# breaks when no more CUT in bidtable
while True:
cut = re.search(r'^(\s*)#\s*CUT\s+(\S+)\s*\n(.*)#ENDCUT[ ]*\n?',
text, flags=re.DOTALL|re.MULTILINE)
if not cut:
break
value = cut.group(3).split('\n')
for i in range(len(value)):
value[i] = value[i][len(cut.group(1)):]
value = '\n'.join(value)
clipboard[cut.group(2)] = value # group2=key
text = text[:cut.start()]+text[cut.end():]
# breaks when no more COPY in bidtable
while True:
copy = re.search(r'^(\s*)#\s*COPY\s+(\S+)\s*\n(.*)#ENDCOPY[ ]*\n?',
text, flags=re.DOTALL|re.MULTILINE)
if not copy:
break
value = copy.group(3).split('\n')
for i in range(len(value)):
value[i] = value[i][len(copy.group(1)):]
value = '\n'.join(value)
clipboard[copy.group(2)] = value # group2=key
text = text[:copy.end(3)]+text[copy.end():]
text = text[:copy.start()]+text[copy.start(3):]
# breaks when no more PASTE in bidtable
while True:
paste = re.search(r'^(\s*)#\s*PASTE\s+(\S+)[^\S\n]*(.*)\n?', text, flags=re.MULTILINE)
if not paste:
break
indentation = paste.group(1)
lines = clipboard[paste.group(2)]
for r in paste.group(3).split():
target, replacement = r.split('=')
lines = lines.replace(target, replacement)
lines = lines.split('\n')
for l in range(len(lines)):
lines[l] = indentation + lines[l]
text = text[:paste.start()] + '\n'.join(lines) + '\n' + text[paste.end():]
hide = re.search(r'^\s*#\s*HIDE\s*\n', text, flags=re.MULTILINE)
if hide:
root.export = False
text = text[:hide.start()]+text[hide.end():]
text = re.sub(r'^\s*#\s*BIDTABLE\s*\n', '', text)
if text.strip() == '':
return None
for row in text.split('\n'):
original_row = row
if row.strip() == '':
continue # could perhaps be nicer by stripping spaces resulting from copy/paste
indentation = len(row) - len(row.lstrip())
# If the indentation is at the same level as the last bids
# description indentation, the description should just
# continue but with a line break
if indentation > 0 and indentation == lastnode.desc_indentation:
lastnode.desc += '\\n' + row.lstrip()
continue
row = row.strip()
bid = row.split(' ')[0]
desc = ' '.join(row.split(' ')[1:]).strip()
desc_indentation = original_row.find(desc)
# removes equal signs at the beginning of the description
new_desc = re.sub(r'^=\s*', '', desc)
desc_indentation += len(desc) - len(new_desc)
desc = new_desc
while indentation < lastnode.indentation:
lastnode = lastnode.parent
if indentation > lastnode.indentation:
lastnode = lastnode.add_child(bid, desc, indentation, desc_indentation)
elif indentation == lastnode.indentation:
lastnode = lastnode.parent.add_child(bid, desc, indentation, desc_indentation)
return root
class ContentType:
BIDTABLE = 1
PARAGRAPH = 2
H1 = 3
H2 = 4
H3 = 5
H4 = 6
LIST = 7
ENUM = 8
DIAGRAM = 9
TABLE = 10
DESCRIPTION = 11
BIDDING = 12
def get_content_type(text):
global meta, vulnerability, seat
if text.startswith('****'):
return (ContentType.H4, text[4:].lstrip())
if text.startswith('***'):
return (ContentType.H3, text[3:].lstrip())
if text.startswith('**'):
return (ContentType.H2, text[2:].lstrip())
if text.startswith('*'):
return (ContentType.H1, text[1:].lstrip())
# The first element is empty, therefore [1:]
if(re.match(r'^\s*-', text)):
if text.find(' :: ') >= 0:
return (ContentType.DESCRIPTION, re.split(r'^\s*-\s*', text, flags=re.MULTILINE)[1:])
return (ContentType.LIST, re.split(r'^\s*-\s*', text, flags=re.MULTILINE)[1:])
if(re.match(r'^\s*#VUL', text)):
vulnerability = text.split()[1]
return None
if(re.match(r'^\s*#SEAT', text)):
seat = text.split()[1]
return None
if(re.match(r'^\s*1\.', text)):
return (ContentType.ENUM, re.split(r'^\s*\d*\.\s*', text, flags=re.MULTILINE)[1:])
if(re.match(r'^\s*\(?[1-7]?[NTPDRCDHS]\)?\s+\(?[1-7]?[NTPDRCDHS]\)?\s+\(?[1-7]?[NTPDRCDHS]\)?\s+\(?[1-7]?[NTPDRCDHS]\)?\s*', text)):
table = []
for r in text.split('\n'):
if r:
table.append(r.split())
return (ContentType.BIDDING, table)
if(re.match(r'^\s*\(?\d[A-Za-z]+', text)):
bidtree = create_bidtree(text)
if bidtree:
return (ContentType.BIDTABLE, bidtree)
return None
# Tables
if(re.match(r'^\s*\|', text)):
table = []
rows = text.split('\n')
for r in rows:
table.append([c.strip() for c in re.findall(r'(?<=\|)[^\|]+', r)])
return (ContentType.TABLE, table)
# diagrams
hands = re.findall(r"""^\s*([NESW]):?\s*
([2-9AKQJTx-]+)\s+
([2-9AKQJTx-]+)\s+
([2-9AKQJTx-]+)\s+
([2-9AKQJTx-]+)""",
text, flags=re.MULTILINE|re.VERBOSE)
if hands and len(hands) + 2 >= len(text.split('\n')):
return (ContentType.DIAGRAM, Diagram(text.split('\n')[0], hands))
metamatch = re.match(r'^\s*#\+(\w+):\s*(.*)', text)
if(metamatch):
keyword = metamatch.group(1)
if keyword in meta:
return None
value = metamatch.group(2)
meta[keyword] = value
return None
if(re.match(r'^\s*#', text)):
bidtree = create_bidtree(text)
if bidtree:
return (ContentType.BIDTABLE, bidtree)
return None
if(re.search(r'\S', text)):
text = re.sub(r'\n +', '\n', text.strip())
return (ContentType.PARAGRAPH, text)
return None
def include_file(matchobj):
filename = matchobj.group(1)
text = ''
with open(filename, 'r') as f:
text = f.read()
return '\n' + text + '\n'
def content_from_file(filename):
global content
paragraphs = []
with open(filename, 'r') as f:
text = f.read()
text = re.sub(r'^\s*#\s*INCLUDE\s*(\S+)\s*\n?', include_file, text, flags=re.MULTILINE)
text = re.sub(r'^//.*\n', '', text, flags=re.MULTILINE)
text = re.sub(r'//.*', '', text)
paragraphs = re.split(r'([ ]*\n){2,}', text)
for c in paragraphs:
content_type = get_content_type(c)
if content_type:
content.append(content_type)
if __name__ == '__main__':
# print('To use BML, use the subprograms bml2html, bml2latex or bml2bss')
content_from_file('test.bml')
|
|
'''
Module that does most of the heavy lifting for the ``conda build`` command.
'''
from __future__ import absolute_import, division, print_function
import io
import json
import os
import shutil
import stat
import subprocess
import sys
import tarfile
from os.path import exists, isdir, isfile, islink, join
import fnmatch
import conda.config as cc
import conda.plan as plan
from conda.api import get_index
from conda.compat import PY3
from conda.fetch import fetch_index
from conda.install import prefix_placeholder, linked
from conda.utils import url_path
from conda.resolve import Resolve, MatchSpec, NoPackagesFound
from conda_build import environ, source, tarcheck
from conda_build.config import config
from conda_build.scripts import create_entry_points, bin_dirname
from conda_build.post import (post_process, post_build,
fix_permissions, get_build_metadata)
from conda_build.utils import rm_rf, _check_call
from conda_build.index import update_index
from conda_build.create_test import (create_files, create_shell_files,
create_py_files, create_pl_files)
def prefix_files():
'''
Returns a set of all files in prefix.
'''
res = set()
for root, dirs, files in os.walk(config.build_prefix):
for fn in files:
res.add(join(root, fn)[len(config.build_prefix) + 1:])
for dn in dirs:
path = join(root, dn)
if islink(path):
res.add(path[len(config.build_prefix) + 1:])
return res
def create_post_scripts(m):
'''
Create scripts to run after build step
'''
recipe_dir = m.path
ext = '.bat' if sys.platform == 'win32' else '.sh'
for tp in 'pre-link', 'post-link', 'pre-unlink':
src = join(recipe_dir, tp + ext)
if not isfile(src):
continue
dst_dir = join(config.build_prefix,
'Scripts' if sys.platform == 'win32' else 'bin')
if not isdir(dst_dir):
os.makedirs(dst_dir, int('755', 8))
dst = join(dst_dir, '.%s-%s%s' % (m.name(), tp, ext))
shutil.copyfile(src, dst)
os.chmod(dst, int('755', 8))
def have_prefix_files(files):
'''
Yields files that contain the current prefix in them, and modifies them
to replace the prefix with a placeholder.
:param files: Filenames to check for instances of prefix
:type files: list of tuples containing strings (prefix, mode, filename)
'''
prefix = config.build_prefix
prefix_bytes = prefix.encode('utf-8')
alt_prefix = prefix.replace('\\', '/')
alt_prefix_bytes = alt_prefix.encode('utf-8')
prefix_placeholder_bytes = prefix_placeholder.encode('utf-8')
for f in files:
if f.endswith(('.pyc', '.pyo', '.a')):
continue
path = join(prefix, f)
if isdir(path):
continue
if sys.platform != 'darwin' and islink(path):
# OSX does not allow hard-linking symbolic links, so we cannot
# skip symbolic links (as we can on Linux)
continue
with open(path, 'rb') as fi:
data = fi.read()
mode = 'binary' if b'\x00' in data else 'text'
if mode == 'text':
if not (sys.platform == 'win32' and alt_prefix_bytes in data):
# Use the placeholder for maximal backwards compatibility, and
# to minimize the occurrences of usernames appearing in built
# packages.
data = rewrite_file_with_new_prefix(path, data, prefix_bytes, prefix_placeholder_bytes)
if prefix_bytes in data:
yield (prefix, mode, f)
if (sys.platform == 'win32') and (alt_prefix_bytes in data):
# some windows libraries use unix-style path separators
yield (alt_prefix, mode, f)
if prefix_placeholder_bytes in data:
yield (prefix_placeholder, mode, f)
def rewrite_file_with_new_prefix(path, data, old_prefix, new_prefix):
# Old and new prefix should be bytes
data = data.replace(old_prefix, new_prefix)
st = os.stat(path)
# Save as
with open(path, 'wb') as fo:
fo.write(data)
os.chmod(path, stat.S_IMODE(st.st_mode) | stat.S_IWUSR) # chmod u+w
return data
def create_info_files(m, files, include_recipe=True):
'''
Creates the metadata files that will be stored in the built package.
:param m: Package metadata
:type m: Metadata
:param files: Paths to files to include in package
:type files: list of str
:param include_recipe: Whether or not to include the recipe (True by default)
:type include_recipe: bool
'''
recipe_dir = join(config.info_dir, 'recipe')
os.makedirs(recipe_dir)
if include_recipe:
for fn in os.listdir(m.path):
if fn.startswith('.'):
continue
src_path = join(m.path, fn)
dst_path = join(recipe_dir, fn)
if isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy(src_path, dst_path)
readme = m.get_value('about/readme')
if readme:
src = join(source.get_dir(), readme)
if not os.path.exists(src):
sys.exit("Error: Could not find the readme: %s" % readme)
dst = join(config.info_dir, readme)
shutil.copy(src, dst)
if os.path.split(readme)[1] not in {"README.md", "README.rst", "README"}:
print("WARNING: Binstar only recognizes about/readme as README.md and README.rst",
file=sys.stderr)
# Deal with Python 2 and 3's different json module type reqs
mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'}
with open(join(config.info_dir, 'index.json'), **mode_dict) as fo:
json.dump(m.info_index(), fo, indent=2, sort_keys=True)
with open(join(config.info_dir, 'recipe.json'), **mode_dict) as fo:
json.dump(m.meta, fo, indent=2, sort_keys=True)
if sys.platform == 'win32':
# make sure we use '/' path separators in metadata
files = [f.replace('\\', '/') for f in files]
with open(join(config.info_dir, 'files'), 'w') as fo:
if m.get_value('build/noarch_python'):
fo.write('\n')
else:
for f in files:
fo.write(f + '\n')
files_with_prefix = sorted(have_prefix_files(files))
binary_has_prefix_files = m.binary_has_prefix_files()
text_has_prefix_files = m.has_prefix_files()
if files_with_prefix and not m.get_value('build/noarch_python'):
auto_detect = m.get_value('build/detect_binary_files_with_prefix')
if sys.platform == 'win32':
# Paths on Windows can contain spaces, so we need to quote the
# paths. Fortunately they can't contain quotes, so we don't have
# to worry about nested quotes.
fmt_str = '"%s" %s "%s"\n'
else:
# Don't do it everywhere because paths on Unix can contain quotes,
# and we don't have a good method of escaping, and because older
# versions of conda don't support quotes in has_prefix
fmt_str = '%s %s %s\n'
with open(join(config.info_dir, 'has_prefix'), 'w') as fo:
for pfix, mode, fn in files_with_prefix:
if (fn in text_has_prefix_files):
# register for text replacement, regardless of mode
fo.write(fmt_str % (pfix, 'text', fn))
text_has_prefix_files.remove(fn)
elif ((mode == 'binary') and (fn in binary_has_prefix_files)):
print("Detected hard-coded path in binary file %s" % fn)
fo.write(fmt_str % (pfix, mode, fn))
binary_has_prefix_files.remove(fn)
elif (auto_detect or (mode == 'text')):
print("Detected hard-coded path in %s file %s" % (mode, fn))
fo.write(fmt_str % (pfix, mode, fn))
else:
print("Ignored hard-coded path in %s" % fn)
# make sure we found all of the files expected
errstr = ""
for f in text_has_prefix_files:
errstr += "Did not detect hard-coded path in %s from has_prefix_files\n" % f
for f in binary_has_prefix_files:
errstr += "Did not detect hard-coded path in %s from binary_has_prefix_files\n" % f
if errstr:
raise RuntimeError(errstr)
no_link = m.get_value('build/no_link')
if no_link:
if not isinstance(no_link, list):
no_link = [no_link]
with open(join(config.info_dir, 'no_link'), 'w') as fo:
for f in files:
if any(fnmatch.fnmatch(f, p) for p in no_link):
fo.write(f + '\n')
if m.get_value('source/git_url'):
with io.open(join(config.info_dir, 'git'), 'w', encoding='utf-8') as fo:
source.git_info(fo)
if m.get_value('app/icon'):
shutil.copyfile(join(m.path, m.get_value('app/icon')),
join(config.info_dir, 'icon.png'))
def get_build_index(clear_cache=True, channel_urls=(), override_channels=False):
if clear_cache:
# remove the cache such that a refetch is made,
# this is necessary because we add the local build repo URL
fetch_index.cache = {}
return get_index(channel_urls=[url_path(config.croot)] + list(channel_urls),
prepend=not override_channels)
def create_env(prefix, specs, clear_cache=True, verbose=True, channel_urls=(),
override_channels=False):
'''
Create a conda envrionment for the given prefix and specs.
'''
if not isdir(config.bldpkgs_dir):
os.makedirs(config.bldpkgs_dir)
update_index(config.bldpkgs_dir)
if specs: # Don't waste time if there is nothing to do
index = get_build_index(clear_cache=True, channel_urls=channel_urls,
override_channels=override_channels)
warn_on_old_conda_build(index)
cc.pkgs_dirs = cc.pkgs_dirs[:1]
actions = plan.install_actions(prefix, index, specs)
plan.display_actions(actions, index)
plan.execute_actions(actions, index, verbose=verbose)
# ensure prefix exists, even if empty, i.e. when specs are empty
if not isdir(prefix):
os.makedirs(prefix)
def warn_on_old_conda_build(index):
root_linked = linked(cc.root_dir)
vers_inst = [dist.rsplit('-', 2)[1] for dist in root_linked
if dist.rsplit('-', 2)[0] == 'conda-build']
if not len(vers_inst) == 1:
print("WARNING: Could not detect installed version of conda-build", file=sys.stderr)
return
r = Resolve(index)
try:
pkgs = sorted(r.get_pkgs(MatchSpec('conda-build')))
except NoPackagesFound:
print("WARNING: Could not find any versions of conda-build in the channels", file=sys.stderr)
return
if pkgs[-1].version != vers_inst[0]:
print("""
WARNING: conda-build appears to be out of date. You have version %s but the
latest version is %s. Run
conda update -n root conda-build
to get the latest version.
""" % (vers_inst[0], pkgs[-1].version), file=sys.stderr)
def rm_pkgs_cache(dist):
'''
Removes dist from the package cache.
'''
cc.pkgs_dirs = cc.pkgs_dirs[:1]
rmplan = ['RM_FETCHED %s' % dist,
'RM_EXTRACTED %s' % dist]
plan.execute_plan(rmplan)
def bldpkg_path(m):
'''
Returns path to built package's tarball given its ``Metadata``.
'''
return join(config.bldpkgs_dir, '%s.tar.bz2' % m.dist())
def build(m, get_src=True, verbose=True, post=None, channel_urls=(), override_channels=False):
'''
Build the package with the specified metadata.
:param m: Package metadata
:type m: Metadata
:param get_src: Should we download the source?
:type get_src: bool
:type post: bool or None. None means run the whole build. True means run
post only. False means stop just before the post.
'''
if (m.get_value('build/detect_binary_files_with_prefix')
or m.binary_has_prefix_files()):
# We must use a long prefix here as the package will only be
# installable into prefixes shorter than this one.
config.use_long_build_prefix = True
else:
# In case there are multiple builds in the same process
config.use_long_build_prefix = False
if post in [False, None]:
print("Removing old build directory")
rm_rf(config.short_build_prefix)
rm_rf(config.long_build_prefix)
print("Removing old work directory")
rm_rf(source.WORK_DIR)
# Display the name only
# Version number could be missing due to dependency on source info.
print("BUILD START:", m.dist())
create_env(config.build_prefix,
[ms.spec for ms in m.ms_depends('build')],
verbose=verbose, channel_urls=channel_urls,
override_channels=override_channels)
if m.name() in [i.rsplit('-', 2)[0] for i in linked(config.build_prefix)]:
print("%s is installed as a build dependency. Removing." %
m.name())
index = get_build_index(clear_cache=False, channel_urls=channel_urls, override_channels=override_channels)
actions = plan.remove_actions(config.build_prefix, [m.name()], index=index)
assert not plan.nothing_to_do(actions), actions
plan.display_actions(actions, index)
plan.execute_actions(actions, index)
if get_src:
source.provide(m.path, m.get_section('source'))
# Parse our metadata again because we did not initialize the source
# information before.
m.parse_again()
print("Package:", m.dist())
assert isdir(source.WORK_DIR)
src_dir = source.get_dir()
contents = os.listdir(src_dir)
if contents:
print("source tree in:", src_dir)
else:
print("no source")
rm_rf(config.info_dir)
files1 = prefix_files()
for pat in m.always_include_files():
has_matches = False
for f in set(files1):
if fnmatch.fnmatch(f, pat):
print("Including in package existing file", f)
files1.discard(f)
has_matches = True
if not has_matches:
sys.exit("Error: Glob %s from always_include_files does not match any files" % pat)
# Save this for later
with open(join(config.croot, 'prefix_files.txt'), 'w') as f:
f.write(u'\n'.join(sorted(list(files1))))
f.write(u'\n')
if sys.platform == 'win32':
import conda_build.windows as windows
windows.build(m)
else:
env = environ.get_dict(m)
build_file = join(m.path, 'build.sh')
script = m.get_value('build/script', None)
if script:
if isinstance(script, list):
script = '\n'.join(script)
build_file = join(source.get_dir(), 'conda_build.sh')
with open(build_file, 'w') as bf:
bf.write(script)
os.chmod(build_file, 0o766)
if exists(build_file):
cmd = ['/bin/bash', '-x', '-e', build_file]
_check_call(cmd, env=env, cwd=src_dir)
if post in [True, None]:
if post == True:
with open(join(config.croot, 'prefix_files.txt'), 'r') as f:
files1 = set(f.read().splitlines())
get_build_metadata(m)
create_post_scripts(m)
create_entry_points(m.get_value('build/entry_points'))
assert not exists(config.info_dir)
files2 = prefix_files()
post_process(sorted(files2 - files1), preserve_egg_dir=bool(m.get_value('build/preserve_egg_dir')))
# The post processing may have deleted some files (like easy-install.pth)
files2 = prefix_files()
assert not any(config.meta_dir in join(config.build_prefix, f) for f in files2 - files1)
post_build(m, sorted(files2 - files1))
create_info_files(m, sorted(files2 - files1),
include_recipe=bool(m.path))
if m.get_value('build/noarch_python'):
import conda_build.noarch_python as noarch_python
noarch_python.transform(m, sorted(files2 - files1))
files3 = prefix_files()
fix_permissions(files3 - files1)
path = bldpkg_path(m)
t = tarfile.open(path, 'w:bz2')
for f in sorted(files3 - files1):
t.add(join(config.build_prefix, f), f)
t.close()
print("BUILD END:", m.dist())
# we're done building, perform some checks
tarcheck.check_all(path)
update_index(config.bldpkgs_dir)
else:
print("STOPPING BUILD BEFORE POST:", m.dist())
def test(m, verbose=True, channel_urls=(), override_channels=False):
'''
Execute any test scripts for the given package.
:param m: Package's metadata.
:type m: Metadata
'''
# remove from package cache
rm_pkgs_cache(m.dist())
tmp_dir = join(config.croot, 'test-tmp_dir')
rm_rf(tmp_dir)
os.makedirs(tmp_dir)
create_files(tmp_dir, m)
# Make Perl or Python-specific test files
if m.name().startswith('perl-'):
pl_files = create_pl_files(tmp_dir, m)
py_files = False
else:
py_files = create_py_files(tmp_dir, m)
pl_files = False
shell_files = create_shell_files(tmp_dir, m)
if not (py_files or shell_files or pl_files):
print("Nothing to test for:", m.dist())
return
print("TEST START:", m.dist())
rm_rf(config.build_prefix)
rm_rf(config.test_prefix)
specs = ['%s %s %s' % (m.name(), m.version(), m.build_id())]
# add packages listed in test/requires
specs_include_python = False
for spec in m.get_value('test/requires', []):
specs.append(spec)
if spec.startswith('python ') or spec == 'python':
specs_include_python = True
if py_files and not specs_include_python:
# as the tests are run by python, we need to specify it
specs += ['python %s*' % environ.get_py_ver()]
if pl_files:
# as the tests are run by perl, we need to specify it
specs += ['perl %s*' % environ.get_perl_ver()]
create_env(config.test_prefix, specs, verbose=verbose,
channel_urls=channel_urls, override_channels=override_channels)
env = dict(os.environ)
# TODO: Include all the same environment variables that are used in
# building.
env.update(environ.get_dict(m, prefix=config.test_prefix))
# prepend bin (or Scripts) directory
env['PATH'] = (join(config.test_prefix, bin_dirname) + os.pathsep +
os.getenv('PATH'))
for varname in 'CONDA_PY', 'CONDA_NPY', 'CONDA_PERL':
env[varname] = str(getattr(config, varname))
env['PREFIX'] = config.test_prefix
# Python 2 Windows requires that envs variables be string, not unicode
env = {str(key): str(value) for key, value in env.items()}
if py_files:
try:
subprocess.check_call([config.test_python, '-s',
join(tmp_dir, 'run_test.py')],
env=env, cwd=tmp_dir)
except subprocess.CalledProcessError:
tests_failed(m)
if pl_files:
try:
subprocess.check_call([config.test_perl,
join(tmp_dir, 'run_test.pl')],
env=env, cwd=tmp_dir)
except subprocess.CalledProcessError:
tests_failed(m)
if shell_files:
if sys.platform == 'win32':
test_file = join(tmp_dir, 'run_test.bat')
cmd = [os.environ['COMSPEC'], '/c', 'call', test_file]
try:
subprocess.check_call(cmd, env=env, cwd=tmp_dir)
except subprocess.CalledProcessError:
tests_failed(m)
else:
test_file = join(tmp_dir, 'run_test.sh')
# TODO: Run the test/commands here instead of in run_test.py
cmd = ['/bin/bash', '-x', '-e', test_file]
try:
subprocess.check_call(cmd, env=env, cwd=tmp_dir)
except subprocess.CalledProcessError:
tests_failed(m)
print("TEST END:", m.dist())
def tests_failed(m):
'''
Causes conda to exit if any of the given package's tests failed.
:param m: Package's metadata
:type m: Metadata
'''
if not isdir(config.broken_dir):
os.makedirs(config.broken_dir)
shutil.move(bldpkg_path(m), join(config.broken_dir, "%s.tar.bz2" % m.dist()))
sys.exit("TESTS FAILED: " + m.dist())
|
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 30 16:22:29 2014
Author: Josef Perktold
License: BSD-3
"""
from io import StringIO
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_equal
import pandas as pd
import patsy
import pytest
from statsmodels import datasets
from statsmodels.base._constraints import fit_constrained
from statsmodels.discrete.discrete_model import Poisson, Logit
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.tools import add_constant
from .results import (
results_glm_logit_constrained as reslogit,
results_poisson_constrained as results,
)
spector_data = datasets.spector.load()
spector_data.endog = np.asarray(spector_data.endog)
spector_data.exog = np.asarray(spector_data.exog)
spector_data.exog = add_constant(spector_data.exog, prepend=False)
DEBUG = False
ss = '''\
agecat smokes deaths pyears
1 1 32 52407
2 1 104 43248
3 1 206 28612
4 1 186 12663
5 1 102 5317
1 0 2 18790
2 0 12 10673
3 0 28 5710
4 0 28 2585
5 0 31 1462'''
data = pd.read_csv(StringIO(ss), delimiter='\t')
data = data.astype(int)
data['logpyears'] = np.log(data['pyears'])
class CheckPoissonConstrainedMixin(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1[0], res2.params[self.idx], rtol=1e-6)
# see below Stata has nan, we have zero
bse1 = np.sqrt(np.diag(res1[1]))
mask = (bse1 == 0) & np.isnan(res2.bse[self.idx])
assert_allclose(bse1[~mask], res2.bse[self.idx][~mask], rtol=1e-6)
def test_basic_method(self):
if hasattr(self, 'res1m'):
res1 = (self.res1m if not hasattr(self.res1m, '_results')
else self.res1m._results)
res2 = self.res2
assert_allclose(res1.params, res2.params[self.idx], rtol=1e-6)
# when a parameter is fixed, the Stata has bse=nan, we have bse=0
mask = (res1.bse == 0) & np.isnan(res2.bse[self.idx])
assert_allclose(res1.bse[~mask], res2.bse[self.idx][~mask],
rtol=1e-6)
tvalues = res2.params_table[self.idx, 2]
# when a parameter is fixed, the Stata has tvalue=nan,
# we have tvalue=inf
mask = np.isinf(res1.tvalues) & np.isnan(tvalues)
assert_allclose(res1.tvalues[~mask], tvalues[~mask], rtol=1e-6)
pvalues = res2.params_table[self.idx, 3]
# note most pvalues are very small
# examples so far agree at 8 or more decimal, but rtol is stricter
# see above
mask = (res1.pvalues == 0) & np.isnan(pvalues)
assert_allclose(res1.pvalues[~mask], pvalues[~mask], rtol=5e-5)
ci_low = res2.params_table[self.idx, 4]
ci_upp = res2.params_table[self.idx, 5]
ci = np.column_stack((ci_low, ci_upp))
# note most pvalues are very small
# examples so far agree at 8 or more decimal, but rtol is stricter
# see above: nan versus value
assert_allclose(res1.conf_int()[~np.isnan(ci)], ci[~np.isnan(ci)],
rtol=5e-5)
# other
assert_allclose(res1.llf, res2.ll, rtol=1e-6)
assert_equal(res1.df_model, res2.df_m)
# Stata does not have df_resid
df_r = res2.N - res2.df_m - 1
assert_equal(res1.df_resid, df_r)
else:
pytest.skip("not available yet")
def test_other(self):
# some results may not be valid or available for all models
if hasattr(self, 'res1m'):
res1 = self.res1m
res2 = self.res2
if hasattr(res2, 'll_0'):
assert_allclose(res1.llnull, res2.ll_0, rtol=1e-6)
else:
if DEBUG:
import warnings
message = ('test: ll_0 not available, llnull=%6.4F'
% res1.llnull)
warnings.warn(message)
else:
pytest.skip("not available yet")
class TestPoissonConstrained1a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_noexposure_constraint
# 2 is dropped baseline for categorical
cls.idx = [7, 3, 4, 5, 6, 0, 1]
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data)
# get start_params, example fails to converge on one CI run
k_vars = len(mod.exog_names)
start_params = np.zeros(k_vars)
start_params[0] = np.log(mod.endog.mean())
# if we need it, this is desired params
# p = np.array([-3.93478643, 1.37276214, 2.33077032, 2.71338891,
# 2.71338891, 0.57966535, 0.97254074])
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
start_params=start_params,
fit_kwds={'method': 'bfgs', 'disp': 0})
# TODO: Newton fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, start_params=start_params,
method='bfgs', disp=0)
@pytest.mark.smoke
def test_summary(self):
# trailing text in summary, assumes it's the first extra string
# NOTE: see comment about convergence in llnull for self.res1m
summ = self.res1m.summary()
assert_('linear equality constraints' in summ.extra_txt)
@pytest.mark.smoke
def test_summary2(self):
# trailing text in summary, assumes it's the first extra string
# NOTE: see comment about convergence in llnull for self.res1m
summ = self.res1m.summary2()
assert_('linear equality constraints' in summ.extra_txt[0])
class TestPoissonConstrained1b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='newton',
disp=0)
class TestPoissonConstrained1c(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='newton', disp=0)
class TestPoissonNoConstrained(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_noconstraint
cls.idx = [6, 2, 3, 4, 5, 0] # 1 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
res1 = mod.fit(disp=0)._results
# res1 is duplicate check, so we can follow the same pattern
cls.res1 = (res1.params, res1.cov_params())
cls.res1m = res1
class TestPoissonConstrained2a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_noexposure_constraint2
# 2 is dropped baseline for categorical
cls.idx = [7, 3, 4, 5, 6, 0, 1]
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data)
# get start_params, example fails to converge on one CI run
k_vars = len(mod.exog_names)
start_params = np.zeros(k_vars)
start_params[0] = np.log(mod.endog.mean())
# if we need it, this is desired params
# p = np.array([-9.43762015, 1.52762442, 2.74155711, 3.58730007,
# 4.08730007, 1.15987869, 0.12111539])
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
start_params=start_params,
fit_kwds={'method': 'bfgs', 'disp': 0})
# TODO: Newton fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, start_params=start_params,
method='bfgs', disp=0)
class TestPoissonConstrained2b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint2
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails to converge. overflow somewhere?
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='bfgs', disp=0,
start_params=cls.res1[0])
class TestPoissonConstrained2c(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint2
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton', 'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr,
method='bfgs', disp=0,
start_params=cls.res1[0])
class TestGLMPoissonConstrained1a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
from statsmodels.base._constraints import fit_constrained
cls.res2 = results.results_noexposure_constraint
# 2 is dropped baseline for categorical
cls.idx = [7, 3, 4, 5, 6, 0, 1]
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = GLM.from_formula(formula, data=data,
family=families.Poisson())
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'atol': 1e-10})
cls.constraints = lc
cls.res1m = mod.fit_constrained(constr, atol=1e-10)
class TestGLMPoissonConstrained1b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
from statsmodels.base._constraints import fit_constrained
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
cls.res2 = results.results_exposure_constraint
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example with offset
formula = 'deaths ~ smokes + C(agecat)'
mod = GLM.from_formula(formula, data=data,
family=families.Poisson(),
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'atol': 1e-10})
cls.constraints = lc
cls.res1m = mod.fit_constrained(constr, atol=1e-10)._results
def test_compare_glm_poisson(self):
res1 = self.res1m
res2 = self.res2
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
res2 = mod.fit_constrained(constr, start_params=self.res1m.params,
method='newton', warn_convergence=False,
disp=0)
# we get high precision because we use the params as start_params
# basic, just as check that we have the same model
assert_allclose(res1.params, res2.params, rtol=1e-12)
assert_allclose(res1.bse, res2.bse, rtol=1e-11)
# check predict, fitted, ...
predicted = res1.predict()
assert_allclose(predicted, res2.predict(), rtol=1e-10)
assert_allclose(res1.mu, predicted, rtol=1e-10)
assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)
assert_allclose(res2.predict(which="linear"),
res2.predict(which="linear"),
rtol=1e-10)
class CheckGLMConstrainedMixin(CheckPoissonConstrainedMixin):
# add tests for some GLM specific attributes
def test_glm(self):
res2 = self.res2 # reference results
res1 = self.res1m
# assert_allclose(res1.aic, res2.aic, rtol=1e-10) # far away
# Stata aic in ereturn and in estat ic are very different
# we have the same as estat ic
# see issue GH#1733
assert_allclose(res1.aic, res2.infocrit[4], rtol=1e-10)
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# FutureWarning for BIC changes
assert_allclose(res1.bic, res2.bic, rtol=1e-10)
# bic is deviance based
# assert_allclose(res1.bic, res2.infocrit[5], rtol=1e-10)
assert_allclose(res1.deviance, res2.deviance, rtol=1e-10)
# TODO: which chi2 are these
# assert_allclose(res1.pearson_chi2, res2.chi2, rtol=1e-10)
class TestGLMLogitConstrained1(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None)
# params sequence same as Stata, but Stata reports param = nan
# and we have param = value = 0
cls.res2 = reslogit.results_constraint1
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
constr = 'x1 = 2.8'
cls.res1m = mod1.fit_constrained(constr)
R, q = cls.res1m.constraints
cls.res1 = fit_constrained(mod1, R, q)
class TestLogitConstrained1(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None)
# params sequence same as Stata, but Stata reports param = nan
# and we have param = value = 0
# res1ul = Logit(data.endog, data.exog).fit(method="newton", disp=0)
cls.res2 = reslogit.results_constraint1
mod1 = Logit(spector_data.endog, spector_data.exog)
constr = 'x1 = 2.8'
# newton doesn't work, raises hessian singular
cls.res1m = mod1.fit_constrained(constr, method='bfgs')
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q, fit_kwds={'method': 'bfgs'})
@pytest.mark.skip(reason='not a GLM')
def test_glm(self):
return
class TestGLMLogitConstrained2(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None) # params sequence same as Stata
cls.res2 = reslogit.results_constraint2
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
constr = 'x1 - x3 = 0'
cls.res1m = mod1.fit_constrained(constr, atol=1e-10)
# patsy compatible constraints
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q, fit_kwds={'atol': 1e-10})
cls.constraints_rq = (R, q)
def test_predict(self):
# results only available for this case
res2 = self.res2 # reference results
res1 = self.res1m
predicted = res1.predict()
assert_allclose(predicted, res2.predict_mu, atol=1e-7)
assert_allclose(res1.mu, predicted, rtol=1e-10)
assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)
@pytest.mark.smoke
def test_summary(self):
# trailing text in summary, assumes it's the first extra string
summ = self.res1m.summary()
assert_('linear equality constraints' in summ.extra_txt)
lc_string = str(self.res1m.constraints)
assert lc_string == "x1 - x3 = 0.0"
@pytest.mark.smoke
def test_summary2(self):
# trailing text in summary, assumes it's the first extra string
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# FutureWarning for BIC changes
summ = self.res1m.summary2()
assert_('linear equality constraints' in summ.extra_txt[0])
def test_fit_constrained_wrap(self):
# minimal test
res2 = self.res2 # reference results
from statsmodels.base._constraints import fit_constrained_wrap
res_wrap = fit_constrained_wrap(self.res1m.model, self.constraints_rq)
assert_allclose(res_wrap.params, res2.params, rtol=1e-6)
assert_allclose(res_wrap.params, res2.params, rtol=1e-6)
class TestGLMLogitConstrained2HC(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None) # params sequence same as Stata
cls.res2 = reslogit.results_constraint2_robust
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
# not used to match Stata for HC
# nobs, k_params = mod1.exog.shape
# k_params -= 1 # one constraint
cov_type = 'HC0'
cov_kwds = {'scaling_factor': 32/31}
# looks like nobs / (nobs - 1) and not (nobs - 1.) / (nobs - k_params)}
constr = 'x1 - x3 = 0'
cls.res1m = mod1.fit_constrained(constr, cov_type=cov_type,
cov_kwds=cov_kwds, atol=1e-10)
R, q = cls.res1m.constraints
cls.res1 = fit_constrained(mod1, R, q, fit_kwds={'atol': 1e-10,
'cov_type': cov_type,
'cov_kwds': cov_kwds})
cls.constraints_rq = (R, q)
class TestLogitConstrained2HC(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None) # params sequence same as Stata
# res1ul = Logit(data.endog, data.exog).fit(method="newton", disp=0)
cls.res2 = reslogit.results_constraint2_robust
mod1 = Logit(spector_data.endog, spector_data.exog)
# not used to match Stata for HC
# nobs, k_params = mod1.exog.shape
# k_params -= 1 # one constraint
cov_type = 'HC0'
cov_kwds = {'scaling_factor': 32/31}
# looks like nobs / (nobs - 1) and not (nobs - 1.) / (nobs - k_params)}
constr = 'x1 - x3 = 0'
cls.res1m = mod1.fit_constrained(constr, cov_type=cov_type,
cov_kwds=cov_kwds, tol=1e-10,
)
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q, fit_kwds={'tol': 1e-10,
'cov_type': cov_type,
'cov_kwds': cov_kwds})
cls.constraints_rq = (R, q)
@pytest.mark.skip(reason='not a GLM')
def test_glm(self):
return
def junk(): # FIXME: make this into a test, or move/remove
# Singular Matrix in mod1a.fit()
# same as Stata default
formula2 = 'deaths ~ C(agecat) + C(smokes) : C(agecat)'
mod = Poisson.from_formula(formula2, data=data,
exposure=data['pyears'].values)
mod.fit()
constraints = 'C(smokes)[T.1]:C(agecat)[3] = C(smokes)[T.1]:C(agec`at)[4]'
import patsy
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
mod.fit_constrained(R, q, fit_kwds={'method': 'bfgs'})
# example without offset
formula1a = 'deaths ~ logpyears + smokes + C(agecat)'
mod1a = Poisson.from_formula(formula1a, data=data)
mod1a.fit()
lc_1a = patsy.DesignInfo(mod1a.exog_names).linear_constraint(
'C(agecat)[T.4] = C(agecat)[T.5]')
mod1a.fit_constrained(lc_1a.coefs, lc_1a.constants,
fit_kwds={'method': 'newton'})
|
|
import time
from urlparse import parse_qs, urlparse
from urllib import quote, urlencode
from mailpile.commands import Command
from mailpile.crypto.gpgi import GnuPG
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.plugins import PluginManager
from mailpile.util import *
class UserSession(object):
EXPIRE_AFTER = 7 * 24 * 3600
def __init__(self, ts=None, auth=None, data=None):
self.ts = ts or time.time()
self.auth = auth
self.data = data or {}
def is_expired(self, now=None):
return (self.ts < (now or time.time()) - self.EXPIRE_AFTER)
def update_ts(self):
self.ts = time.time()
class UserSessionCache(dict):
def delete_expired(self, now=None):
now = now or time.time()
for k in self.keys():
if self[k].is_expired(now=now):
del self[k]
def VerifyAndStorePassphrase(config, passphrase=None, sps=None,
key=None):
if passphrase and not sps:
from mailpile.config import SecurePassphraseStorage
sps = SecurePassphraseStorage(passphrase)
passphrase = 'this probably does not really overwrite :-( '
assert(sps is not None)
# Note: Must use GnuPG without a config, otherwise bad things happen.
gpg = GnuPG(None, use_agent=False, debug=('gnupg' in config.sys.debug))
if gpg.is_available():
gpg.passphrase = sps.get_reader()
gpgr = config.prefs.gpg_recipient
gpgr = key or (gpgr if (gpgr not in (None, '', '!CREATE')) else None)
assert(gpg.sign('Sign This!', fromkey=gpgr)[0] == 0)
# Fun side effect: changing the passphrase invalidates the message cache
import mailpile.mailutils
mailpile.mailutils.ClearParseCache(full=True)
return sps
def SetLoggedIn(cmd, user=None, redirect=False, session_id=None):
user = user or 'DEFAULT'
sid = session_id or cmd.session.ui.html_variables.get('http_session')
if sid:
if cmd:
cmd.session.ui.debug('Logged in %s as %s' % (sid, user))
SESSION_CACHE[sid] = UserSession(auth=user, data={
't': '%x' % int(time.time()),
})
if cmd:
if redirect:
return cmd._do_redirect()
else:
return True
def CheckPassword(config, username, password):
# FIXME: Do something with the username
return (config.gnupg_passphrase and
config.gnupg_passphrase.compare(password)) and 'DEFAULT'
SESSION_CACHE = UserSessionCache()
class Authenticate(Command):
"""Authenticate a user (log in)"""
SYNOPSIS = (None, 'login', 'auth/login', None)
ORDER = ('Internals', 5)
SPLIT_ARG = False
IS_INTERACTIVE = True
CONFIG_REQUIRED = False
HTTP_AUTH_REQUIRED = False
HTTP_STRICT_VARS = False
HTTP_CALLABLE = ('GET', 'POST')
HTTP_POST_VARS = {
'user': 'User to authenticate as',
'pass': 'Password or passphrase'
}
@classmethod
def RedirectBack(cls, url, data):
qs = [(k, v.encode('utf-8')) for k, vl in data.iteritems() for v in vl
if k not in ['_method', '_path'] + cls.HTTP_POST_VARS.keys()]
qs = urlencode(qs)
url = ''.join([url, '?%s' % qs if qs else ''])
raise UrlRedirectException(url)
def _result(self, result=None):
result = result or {}
result['login_banner'] = self.session.config.sys.login_banner
return result
def _error(self, message, info=None, result=None):
return Command._error(self, message,
info=info, result=self._result(result))
def _success(self, message, result=None):
return Command._success(self, message, result=self._result(result))
def _do_redirect(self):
path = self.data.get('_path', [None])[0]
if (path and
not path[1:].startswith(DeAuthenticate.SYNOPSIS[2] or '!') and
not path[1:].startswith(self.SYNOPSIS[2] or '!')):
self.RedirectBack(path, self.data)
else:
raise UrlRedirectException('/')
def _do_login(self, user, password, load_index=False, redirect=False):
session, config = self.session, self.session.config
session_id = self.session.ui.html_variables.get('http_session')
# This prevents folks from sending us a DEFAULT user (upper case),
# which is an internal security bypass below.
user = user and user.lower()
if not user:
try:
# Verify the passphrase
if CheckPassword(config, None, password):
sps = config.gnupg_passphrase
else:
sps = VerifyAndStorePassphrase(config, passphrase=password)
if sps:
# Store the varified passphrase
config.gnupg_passphrase.data = sps.data
# Load the config and index, if necessary
config = self._config()
self._idx(wait=False)
if load_index:
try:
while not config.index:
time.sleep(1)
except KeyboardInterrupt:
pass
session.ui.debug('Good passphrase for %s' % session_id)
return self._success(_('Hello world, welcome!'), result={
'authenticated': SetLoggedIn(self, redirect=redirect)
})
else:
session.ui.debug('No GnuPG, checking DEFAULT user')
# No GnuPG, see if there is a DEFAULT user in the config
user = 'DEFAULT'
except (AssertionError, IOError):
session.ui.debug('Bad passphrase for %s' % session_id)
return self._error(_('Invalid passphrase, please try again'))
if user in config.logins or user == 'DEFAULT':
# FIXME: Salt and hash the password, check if it matches
# the entry in our user/password list (TODO).
# NOTE: This hack effectively disables auth without GnUPG
if user == 'DEFAULT':
session.ui.debug('FIXME: Unauthorized login allowed')
return self._logged_in(redirect=redirect)
raise Exception('FIXME')
return self._error(_('Incorrect username or password'))
def command(self):
session_id = self.session.ui.html_variables.get('http_session')
if self.data.get('_method', '') == 'POST':
if 'pass' in self.data:
return self._do_login(self.data.get('user', [None])[0],
self.data['pass'][0],
redirect=True)
elif not self.data:
password = self.session.ui.get_password(_('Your password: '))
return self._do_login(None, password, load_index=True)
elif (session_id in SESSION_CACHE and
SESSION_CACHE[session_id].auth and
'_method' in self.data):
self._do_redirect()
return self._success(_('Please log in'))
class DeAuthenticate(Command):
"""De-authenticate a user (log out)"""
SYNOPSIS = (None, 'logout', 'auth/logout', '[<session ID>]')
ORDER = ('Internals', 5)
SPLIT_ARG = False
IS_INTERACTIVE = True
CONFIG_REQUIRED = False
HTTP_AUTH_REQUIRED = False
HTTP_CALLABLE = ('GET', 'POST')
def command(self):
# FIXME: Should this only be a POST request?
# FIXME: This needs CSRF protection.
session_id = self.session.ui.html_variables.get('http_session')
if self.args and not session_id:
session_id = self.args[0]
if session_id:
try:
self.session.ui.debug('Logging out %s' % session_id)
del SESSION_CACHE[session_id]
return self._success(_('Goodbye!'))
except KeyError:
pass
return self._error(_('No session found!'))
plugin_manager = PluginManager(builtin=True)
plugin_manager.register_commands(Authenticate, DeAuthenticate)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from freezegun import freeze_time
from CTFd.models import Challenges, Flags, Hints, Solves, Tags, Users
from CTFd.utils import set_config
from tests.helpers import (
create_ctfd,
destroy_ctfd,
gen_challenge,
gen_fail,
gen_flag,
gen_hint,
gen_solve,
gen_tag,
gen_team,
gen_user,
login_as_user,
register_user,
)
def test_api_challenges_get_visibility_public():
"""Can a public user get /api/v1/challenges if challenge_visibility is private/public"""
app = create_ctfd()
with app.app_context():
set_config("challenge_visibility", "public")
with app.test_client() as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
set_config("challenge_visibility", "private")
r = client.get("/api/v1/challenges", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenges_get_ctftime_public():
"""Can a public user get /api/v1/challenges if ctftime is over"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-7"):
set_config("challenge_visibility", "public")
with app.test_client() as client:
r = client.get("/api/v1/challenges")
assert r.status_code == 200
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
r = client.get("/api/v1/challenges")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenges_get_visibility_private():
"""Can a private user get /api/v1/challenges if challenge_visibility is private/public"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges")
assert r.status_code == 200
set_config("challenge_visibility", "public")
r = client.get("/api/v1/challenges")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenges_get_ctftime_private():
"""Can a private user get /api/v1/challenges if ctftime is over"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-7"):
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges")
assert r.status_code == 200
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
r = client.get("/api/v1/challenges")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenges_get_verified_emails():
"""Can a verified email user get /api/v1/challenges"""
app = create_ctfd()
with app.app_context():
set_config("verify_emails", True)
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges", json="")
assert r.status_code == 403
gen_user(
app.db,
name="user_name",
email="verified_user@ctfd.io",
password="password",
verified=True,
)
registered_client = login_as_user(app, "user_name", "password")
r = registered_client.get("/api/v1/challenges")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenges_post_non_admin():
"""Can a user post /api/v1/challenges if not admin"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.post("/api/v1/challenges", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenges_get_admin():
"""Can a user GET /api/v1/challenges if admin without team"""
app = create_ctfd(user_mode="teams")
with app.app_context():
gen_challenge(app.db)
# Admin does not have a team but should still be able to see challenges
user = Users.query.filter_by(id=1).first()
assert user.team_id is None
with login_as_user(app, "admin") as admin:
r = admin.get("/api/v1/challenges", json="")
assert r.status_code == 200
r = admin.get("/api/v1/challenges/1", json="")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenges_get_hidden_admin():
"""Can an admin see hidden challenges in API list response"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db, state="hidden")
gen_challenge(app.db)
with login_as_user(app, "admin") as admin:
challenges_list = admin.get("/api/v1/challenges", json="").get_json()[
"data"
]
assert len(challenges_list) == 1
challenges_list = admin.get(
"/api/v1/challenges?view=admin", json=""
).get_json()["data"]
assert len(challenges_list) == 2
destroy_ctfd(app)
def test_api_challenges_post_admin():
"""Can a user post /api/v1/challenges if admin"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, "admin") as client:
r = client.post(
"/api/v1/challenges",
json={
"name": "chal",
"category": "cate",
"description": "desc",
"value": "100",
"state": "hidden",
"type": "standard",
},
)
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_types_post_non_admin():
"""Can a non-admin get /api/v1/challenges/types if not admin"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get("/api/v1/challenges/types", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_types_post_admin():
"""Can an admin get /api/v1/challenges/types if admin"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, "admin") as client:
r = client.get("/api/v1/challenges/types", json="")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_visibility_public():
"""Can a public user get /api/v1/challenges/<challenge_id> if challenge_visibility is private/public"""
app = create_ctfd()
with app.app_context():
set_config("challenge_visibility", "public")
with app.test_client() as client:
gen_challenge(app.db)
r = client.get("/api/v1/challenges/1")
assert r.status_code == 200
set_config("challenge_visibility", "private")
r = client.get("/api/v1/challenges/1", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_ctftime_public():
"""Can a public user get /api/v1/challenges/<challenge_id> if ctftime is over"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-7"):
set_config("challenge_visibility", "public")
gen_challenge(app.db)
with app.test_client() as client:
r = client.get("/api/v1/challenges/1")
assert r.status_code == 200
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
r = client.get("/api/v1/challenges/1")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_visibility_private():
"""Can a private user get /api/v1/challenges/<challenge_id> if challenge_visibility is private/public"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges/1")
assert r.status_code == 200
set_config("challenge_visibility", "public")
r = client.get("/api/v1/challenges/1")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_with_admin_only_account_visibility():
"""Can a private user get /api/v1/challenges/<challenge_id> if account_visibility is admins_only"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges/1")
assert r.status_code == 200
set_config("account_visibility", "admins")
r = client.get("/api/v1/challenges/1")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_ctftime_private():
"""Can a private user get /api/v1/challenges/<challenge_id> if ctftime is over"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-7"):
gen_challenge(app.db)
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges/1")
assert r.status_code == 200
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
r = client.get("/api/v1/challenges/1")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_verified_emails():
"""Can a verified email load /api/v1/challenges/<challenge_id>"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-5"):
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
set_config("verify_emails", True)
gen_challenge(app.db)
gen_user(
app.db,
name="user_name",
email="verified_user@ctfd.io",
password="password",
verified=True,
)
register_user(app)
client = login_as_user(app)
registered_client = login_as_user(app, "user_name", "password")
r = client.get("/api/v1/challenges/1", json="")
assert r.status_code == 403
r = registered_client.get("/api/v1/challenges/1")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_non_existing():
"""Will a bad <challenge_id> at /api/v1/challenges/<challenge_id> 404"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-5"):
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges/1")
assert r.status_code == 404
destroy_ctfd(app)
def test_api_challenge_patch_non_admin():
"""Can a user patch /api/v1/challenges/<challenge_id> if not admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with app.test_client() as client:
r = client.patch("/api/v1/challenges/1", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_patch_admin():
"""Can a user patch /api/v1/challenges/<challenge_id> if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with login_as_user(app, "admin") as client:
r = client.patch(
"/api/v1/challenges/1", json={"name": "chal_name", "value": "200"}
)
assert r.status_code == 200
assert r.get_json()["data"]["value"] == 200
destroy_ctfd(app)
def test_api_challenge_delete_non_admin():
"""Can a user delete /api/v1/challenges/<challenge_id> if not admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with app.test_client() as client:
r = client.delete("/api/v1/challenges/1", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_delete_admin():
"""Can a user delete /api/v1/challenges/<challenge_id> if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with login_as_user(app, "admin") as client:
r = client.delete("/api/v1/challenges/1", json="")
assert r.status_code == 200
assert r.get_json().get("data") is None
destroy_ctfd(app)
def test_api_challenge_with_properties_delete_admin():
"""Can a user delete /api/v1/challenges/<challenge_id> if the challenge has other properties"""
app = create_ctfd()
with app.app_context():
challenge = gen_challenge(app.db)
gen_hint(app.db, challenge_id=challenge.id)
gen_tag(app.db, challenge_id=challenge.id)
gen_flag(app.db, challenge_id=challenge.id)
challenge = Challenges.query.filter_by(id=1).first()
assert len(challenge.hints) == 1
assert len(challenge.tags) == 1
assert len(challenge.flags) == 1
with login_as_user(app, "admin") as client:
r = client.delete("/api/v1/challenges/1", json="")
assert r.status_code == 200
assert r.get_json().get("data") is None
assert Tags.query.count() == 0
assert Hints.query.count() == 0
assert Flags.query.count() == 0
destroy_ctfd(app)
def test_api_challenge_attempt_post_public():
"""Can a public user post /api/v1/challenges/attempt"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with app.test_client() as client:
r = client.post("/api/v1/challenges/attempt", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_attempt_post_private():
"""Can an private user post /api/v1/challenges/attempt"""
app = create_ctfd()
with app.app_context():
challenge_id = gen_challenge(app.db).id
gen_flag(app.db, challenge_id)
register_user(app)
with login_as_user(app) as client:
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": challenge_id, "submission": "wrong_flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "incorrect"
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": challenge_id, "submission": "flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "correct"
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": challenge_id, "submission": "flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "already_solved"
challenge_id = gen_challenge(app.db).id
gen_flag(app.db, challenge_id)
with login_as_user(app) as client:
for i in range(10):
gen_fail(app.db, user_id=2, challenge_id=challenge_id)
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": challenge_id, "submission": "flag"},
)
assert r.status_code == 429
assert r.get_json()["data"]["status"] == "ratelimited"
destroy_ctfd(app)
app = create_ctfd(user_mode="teams")
with app.app_context():
challenge_id = gen_challenge(app.db).id
gen_flag(app.db, challenge_id)
register_user(app)
team_id = gen_team(app.db).id
user = Users.query.filter_by(id=2).first()
user.team_id = team_id
app.db.session.commit()
with login_as_user(app) as client:
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": challenge_id, "submission": "wrong_flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "incorrect"
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": challenge_id, "submission": "flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "correct"
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": challenge_id, "submission": "flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "already_solved"
challenge_id = gen_challenge(app.db).id
gen_flag(app.db, challenge_id)
with login_as_user(app) as client:
for i in range(10):
gen_fail(app.db, user_id=2, team_id=team_id, challenge_id=challenge_id)
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": challenge_id, "submission": "flag"},
)
assert r.status_code == 429
assert r.get_json()["data"]["status"] == "ratelimited"
destroy_ctfd(app)
def test_api_challenge_attempt_post_admin():
"""Can an admin user post /api/v1/challenges/attempt"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
gen_flag(app.db, 1)
with login_as_user(app, "admin") as client:
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": 1, "submission": "wrong_flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "incorrect"
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": 1, "submission": "flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "correct"
r = client.post(
"/api/v1/challenges/attempt",
json={"challenge_id": 1, "submission": "flag"},
)
assert r.status_code == 200
assert r.get_json()["data"]["status"] == "already_solved"
destroy_ctfd(app)
def test_api_challenge_get_solves_visibility_public():
"""Can a public user get /api/v1/challenges/<challenge_id>/solves if challenge_visibility is private/public"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with app.test_client() as client:
set_config("challenge_visibility", "public")
r = client.get("/api/v1/challenges/1/solves", json="")
assert r.status_code == 200
set_config("challenge_visibility", "private")
r = client.get("/api/v1/challenges/1/solves", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_solves_ctftime_public():
"""Can a public user get /api/v1/challenges/<challenge_id>/solves if ctftime is over"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-7"):
set_config("challenge_visibility", "public")
gen_challenge(app.db)
with app.test_client() as client:
r = client.get("/api/v1/challenges/1/solves")
assert r.status_code == 200
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
r = client.get("/api/v1/challenges/1/solves", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_solves_ctf_frozen():
"""Test users can only see challenge solves that happened before freeze time"""
app = create_ctfd()
with app.app_context():
register_user(app, name="user1", email="user1@ctfd.io")
register_user(app, name="user2", email="user2@ctfd.io")
# Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
set_config("freeze", "1507262400")
with freeze_time("2017-10-4"):
chal = gen_challenge(app.db)
chal_id = chal.id
gen_solve(app.db, user_id=2, challenge_id=chal_id)
chal2 = gen_challenge(app.db)
chal2_id = chal2.id
with freeze_time("2017-10-8"):
# User ID 2 solves Challenge ID 2
gen_solve(app.db, user_id=2, challenge_id=chal2_id)
# User ID 3 solves Challenge ID 1
gen_solve(app.db, user_id=3, challenge_id=chal_id)
# Challenge 1 has 2 solves
# Challenge 2 has 1 solve
# There should now be two solves assigned to the same user.
assert Solves.query.count() == 3
client = login_as_user(app, name="user2")
# Challenge 1 should have one solve (after freeze)
r = client.get("/api/v1/challenges/1")
data = r.get_json()["data"]
assert data["solves"] == 1
# Challenge 1 should have one solve (after freeze)
r = client.get("/api/v1/challenges/1/solves")
data = r.get_json()["data"]
assert len(data) == 1
# Challenge 2 should have a solve shouldn't be shown to the user
r = client.get("/api/v1/challenges/2/solves")
data = r.get_json()["data"]
assert len(data) == 0
# Admins should see data as an admin with no modifications
admin = login_as_user(app, name="admin")
r = admin.get("/api/v1/challenges/2/solves")
data = r.get_json()["data"]
assert len(data) == 1
# But should see as a user if the preview param is passed
r = admin.get("/api/v1/challenges/2/solves?preview=true")
data = r.get_json()["data"]
assert len(data) == 0
destroy_ctfd(app)
def test_api_challenge_get_solves_visibility_private():
"""Can a private user get /api/v1/challenges/<challenge_id>/solves if challenge_visibility is private/public"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges/1/solves")
assert r.status_code == 200
set_config("challenge_visibility", "public")
r = client.get("/api/v1/challenges/1/solves")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_solves_ctftime_private():
"""Can a private user get /api/v1/challenges/<challenge_id>/solves if ctftime is over"""
app = create_ctfd()
with app.app_context(), freeze_time("2017-10-7"):
gen_challenge(app.db)
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges/1/solves")
assert r.status_code == 200
set_config(
"start", "1507089600"
) # Wednesday, October 4, 2017 12:00:00 AM GMT-04:00 DST
set_config(
"end", "1507262400"
) # Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
r = client.get("/api/v1/challenges/1/solves")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_solves_verified_emails():
"""Can a verified email get /api/v1/challenges/<challenge_id>/solves"""
app = create_ctfd()
with app.app_context():
set_config("verify_emails", True)
gen_challenge(app.db)
gen_user(
app.db,
name="user_name",
email="verified_user@ctfd.io",
password="password",
verified=True,
)
register_user(app)
client = login_as_user(app)
registered_client = login_as_user(app, "user_name", "password")
r = client.get("/api/v1/challenges/1/solves", json="")
assert r.status_code == 403
r = registered_client.get("/api/v1/challenges/1/solves")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenges_get_solves_score_visibility():
"""Can a user get /api/v1/challenges/<challenge_id>/solves if score_visibility is public/private/admin"""
app = create_ctfd()
with app.app_context():
set_config("challenge_visibility", "public")
set_config("score_visibility", "public")
gen_challenge(app.db)
with app.test_client() as client:
r = client.get("/api/v1/challenges/1/solves")
assert r.status_code == 200
set_config("challenge_visibility", "private")
set_config("score_visibility", "private")
register_user(app)
private_client = login_as_user(app)
r = private_client.get("/api/v1/challenges/1/solves")
assert r.status_code == 200
set_config("score_visibility", "admins")
admin = login_as_user(app, "admin", "password")
r = admin.get("/api/v1/challenges/1/solves")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_solves_404():
"""Will a bad <challenge_id> at /api/v1/challenges/<challenge_id>/solves 404"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
r = client.get("/api/v1/challenges/1/solves")
assert r.status_code == 404
destroy_ctfd(app)
def test_api_challenge_solves_returns_correct_data():
"""Test that /api/v1/<challenge_id>/solves returns expected data"""
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app)
chal = gen_challenge(app.db)
gen_solve(app.db, user_id=2, challenge_id=chal.id)
r = client.get("/api/v1/challenges/1/solves")
resp = r.get_json()["data"]
solve = resp[0]
assert r.status_code == 200
assert solve.get("account_id") == 2
assert solve.get("name") == "user"
assert solve.get("date") is not None
assert solve.get("account_url") == "/users/2"
destroy_ctfd(app)
app = create_ctfd(user_mode="teams")
with app.app_context():
register_user(app)
client = login_as_user(app)
team = gen_team(app.db)
user = Users.query.filter_by(id=2).first()
user.team_id = team.id
app.db.session.commit()
chal = gen_challenge(app.db)
gen_solve(app.db, user_id=2, team_id=1, challenge_id=chal.id)
r = client.get("/api/v1/challenges/1/solves")
resp = r.get_json()["data"]
solve = resp[0]
assert r.status_code == 200
assert solve.get("account_id") == 1
assert solve.get("name") == "team_name"
assert solve.get("date") is not None
assert solve.get("account_url") == "/teams/1"
destroy_ctfd(app)
app = create_ctfd(application_root="/ctf")
with app.app_context():
register_user(app)
client = login_as_user(app)
chal = gen_challenge(app.db)
gen_solve(app.db, user_id=2, challenge_id=chal.id)
r = client.get("/api/v1/challenges/1/solves")
resp = r.get_json()["data"]
solve = resp[0]
assert r.status_code == 200
assert solve.get("account_id") == 2
assert solve.get("name") == "user"
assert solve.get("date") is not None
assert solve.get("account_url") == "/ctf/users/2"
destroy_ctfd(app)
def test_api_challenge_get_files_non_admin():
"""Can a user get /api/v1/challenges/<challenge_id>/files if not admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with app.test_client() as client:
r = client.get("/api/v1/challenges/1/files", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_files_admin():
"""Can a user get /api/v1/challenges/<challenge_id>/files if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with login_as_user(app, "admin") as client:
r = client.get("/api/v1/challenges/1/files")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_tags_non_admin():
"""Can a user get /api/v1/challenges/<challenge_id>/tags if not admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with app.test_client() as client:
r = client.get("/api/v1/challenges/1/tags", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_tags_admin():
"""Can a user get /api/v1/challenges/<challenge_id>/tags if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with login_as_user(app, "admin") as client:
r = client.get("/api/v1/challenges/1/tags")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_hints_non_admin():
"""Can a user get /api/v1/challenges/<challenge_id>/hints if not admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with app.test_client() as client:
r = client.get("/api/v1/challenges/1/hints", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_hints_admin():
"""Can a user get /api/v1/challenges/<challenge_id>/hints if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with login_as_user(app, "admin") as client:
r = client.get("/api/v1/challenges/1/hints")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_challenge_get_flags_non_admin():
"""Can a user get /api/v1/challenges/<challenge_id>/flags if not admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with app.test_client() as client:
r = client.get("/api/v1/challenges/1/flags", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_challenge_get_flags_admin():
"""Can a user get /api/v1/challenges/<challenge_id>/flags if admin"""
app = create_ctfd()
with app.app_context():
gen_challenge(app.db)
with login_as_user(app, "admin") as client:
r = client.get("/api/v1/challenges/1/flags")
assert r.status_code == 200
destroy_ctfd(app)
|
|
import os
import sys
import pandas as pd
import numpy as np
from numpy.random import poisson, uniform
from numpy import mean
import time
import math
po = False
team_homes = pd.read_csv(os.path.join(os.path.split(__file__)[0], 'TeamHomes.csv'), header = None, index_col = 0)
stadium_locs = pd.read_csv(os.path.join(os.path.split(__file__)[0], 'StadiumLocs.csv'), index_col = 0)
teamsheetpath = os.path.join(os.path.split(__file__)[0], 'teamcsvs')
compstat = {'TF': 'TA', 'TA': 'TF', #Dictionary to use to compare team stats with opponent stats
'CF': 'CA', 'CA': 'CF',
'CON%F': 'CON%A', 'CON%A': 'CON%F',
'PF': 'PA', 'PA': 'PF',
'DGF': 'DGA', 'DGA': 'DGF'}
def get_opponent_stats(opponent, venue): #Gets summaries of statistics for opponent each week
opponent_stats = {}
global teamsheetpath, stadium_locs, team_homes
opp_stats = pd.DataFrame.from_csv(os.path.join(teamsheetpath, opponent + '.csv'))
opponent_home = team_homes[1][opponent]
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
(opponent_home_lat, opponent_home_lng) = stadium_locs.loc[opponent_home, ['Lat', 'Long']]
opponent_reference_distance = geodesic_distance(opponent_home_lat, opponent_home_lng, venue_lat, venue_lng)
def get_opponent_weight(location):
return get_travel_weight(location, opponent_home_lat, opponent_home_lng, opponent_reference_distance)
opp_stats['Weight'] = opp_stats['VENUE'].apply(get_opponent_weight)
for stat in opp_stats.columns:
if stat != 'VENUE':
if stat != 'OPP':
opponent_stats.update({stat: np.average(opp_stats[stat], weights = opp_stats['Weight'])})
opponent_stats.update({'CON%F': float((opp_stats['CF']*opp_stats['Weight']).sum())/(opp_stats['TF']*opp_stats['Weight']).sum()})
opponent_stats.update({'CON%A': float((opp_stats['CA']*opp_stats['Weight']).sum())/(opp_stats['TA']*opp_stats['Weight']).sum()})
return opponent_stats
def get_residual_performance(score_df): #Get how each team has done compared to the average performance of their opponents
global teamsheetpath, team_homes, stadium_locs
#score_df = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team + '.csv'))
residual_stats = {}
score_df['CON%F'] = np.nan
score_df['CON%A'] = np.nan
for week in score_df.index:
opponent_stats = get_opponent_stats(score_df['OPP'][week], score_df['VENUE'][week])
for stat in opponent_stats:
if week == score_df.index.tolist()[0]:
score_df['OPP_' + stat] = np.nan
score_df['OPP_' + stat][week] = opponent_stats[stat]
score_df['CON%F'][week] = float(score_df['CF'][week]) / score_df['TF'][week]
score_df['CON%A'][week] = float(score_df['CA'][week]) / score_df['TA'][week]
for stat in opponent_stats:
if stat == 'Weight':
continue
score_df['R_' + stat] = score_df[stat] - score_df['OPP_' + compstat[stat]]
if stat in ['TF', 'PF', 'DGF', 'TA', 'PA', 'DGA']:
residual_stats.update({stat: np.average(score_df['R_' + stat], weights = score_df['Weight'])})
elif stat == 'CON%F':
residual_stats.update({stat: (score_df['R_CON%F'].multiply(score_df['TF'])*score_df['Weight']).sum() / (score_df['TF']*score_df['Weight']).sum()})
elif stat == 'CON%A':
residual_stats.update({stat: (score_df['R_CON%A'].multiply(score_df['TA'])*score_df['Weight']).sum() / (score_df['TA']*score_df['Weight']).sum()})
return residual_stats
def get_score(expected_scores): #Get the score for a team based on expected scores
score = 0
if expected_scores['T'] > 0:
tries = poisson(expected_scores['T'])
else:
tries = poisson(0.01)
score = score + 6 * tries
if expected_scores['P'] > 0:
fgs = poisson(expected_scores['P'])
else:
fgs = poisson(0.01)
score = score + 3 * fgs
if expected_scores['DG'] > 0:
sfs = poisson(expected_scores['DG'])
else:
sfs = poisson(0.01)
score = score + 2 * sfs
for t in range(tries):
successful_con_determinant = uniform(0, 1)
if successful_con_determinant <= expected_scores['CONPROB']:
score += 2
else:
continue
#if tries >= 4:
# bp = True
#else:
# bp = False
return (score, tries)
def game(team_1, team_2,
expected_scores_1, expected_scores_2,
playoff = False): #Get two scores and determine a winner
(score_1, tries_1) = get_score(expected_scores_1)
(score_2, tries_2) = get_score(expected_scores_2)
if tries_1 - tries_2 >= 3:
bp1 = True
bp2 = False
elif tries_2 - tries_1 >= 3:
bp1 = False
bp2 = True
else:
bp1 = False
bp2 = False
if score_1 > score_2:
win_1 = 1
win_2 = 0
draw_1 = 0
draw_2 = 0
if bp1:
bpw1 = 1
else:
bpw1 = 0
if bp2:
bpl2 = 1
else:
bpl2 = 0
bpl1 = 0
bpw2 = 0
bpd1 = 0
bpd2 = 0
lbp1 = 0
if score_1 - score_2 <= 7:
lbp2 = 1
else:
lbp2 = 0
elif score_2 > score_1:
win_1 = 0
win_2 = 1
draw_1 = 0
draw_2 = 0
if bp1:
bpl1 = 1
else:
bpl1 = 0
if bp2:
bpw2 = 1
else:
bpw2 = 0
bpw1 = 0
bpl2 = 0
bpd1 = 0
bpd2 = 0
lbp2 = 0
if score_2 - score_1 <= 7:
lbp1 = 1
else:
lbp1 = 0
else:
if playoff:
win_1 = 0.5
win_2 = 0.5
draw_1 = 0
draw_2 = 0
bpw1 = 0
bpw2 = 0
bpd1 = 0
bpd2 = 0
bpl1 = 0
bpl2 = 0
lbp1 = 0
lbp2 = 0
else:
win_1 = 0
win_2 = 0
draw_1 = 1
draw_2 = 1
bpw1 = 0
bpw2 = 0
bpl1 = 0
bpl2 = 0
lbp1 = 0
lbp2 = 0
if bp1:
bpd1 = 1
else:
bpd1 = 0
if bp2:
bpd2 = 1
else:
bpd2 = 0
summary = {team_1: [win_1, draw_1, score_1, bpw1, bpd1, bpl1, lbp1]}
summary.update({team_2: [win_2, draw_2, score_2, bpw2, bpd2, bpl2, lbp2]})
return summary
def get_expected_scores(team_1_stats, team_2_stats, team_1_df, team_2_df): #Get the expected scores for a matchup based on the previous teams' performances
expected_scores = {}
for stat in team_1_stats:
expected_scores.update({'T': mean([team_1_stats['TF'] + np.average(team_2_df['TA'], weights = team_2_df['Weight']),
team_2_stats['TA'] + np.average(team_1_df['TF'], weights = team_1_df['Weight'])])})
expected_scores.update({'P': mean([team_1_stats['PF'] + np.average(team_2_df['PA'], weights = team_2_df['Weight']),
team_2_stats['PA'] + np.average(team_1_df['PF'], weights = team_1_df['Weight'])])})
expected_scores.update({'DG': mean([team_1_stats['DGF'] + np.average(team_2_df['DGA'], weights = team_2_df['Weight']),
team_2_stats['DGA'] + np.average(team_1_df['DGF'], weights = team_1_df['Weight'])])})
#print mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
# team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
conprob = mean([team_1_stats['CON%F'] + (team_2_df['CA']*team_2_df['Weight']).sum() / (team_2_df['TA']*team_2_df['Weight']).sum(),
team_2_stats['CON%A'] + (team_1_df['CF']*team_1_df['Weight']).sum() / (team_1_df['TF']*team_1_df['Weight']).sum()])
if not math.isnan(conprob):
expected_scores.update({'CONPROB': conprob})
else:
expected_scores.update({'CONPROB': 0.75})
#print(expected_scores['PAT1PROB'])
#print(expected_scores)
return expected_scores
def geodesic_distance(olat, olng, dlat, dlng):
'''
Returns geodesic distance in percentage of half the earth's circumference between two points on the earth's surface
'''
scale = math.tau/360
olat *= scale
olng *= scale
dlat *= scale
dlng *= scale
delta_lat = (dlat - olat)
delta_lng = (dlng - olng)
a = math.sin(delta_lat/2)**2 + math.cos(olat)*math.cos(dlat)*math.sin(delta_lng/2)**2
return 4*math.atan2(math.sqrt(a), math.sqrt(1-a))/math.tau
def get_travel_weight(venue, home_lat, home_lng, reference_distance):
'''
Gets the travel weight based on a venue, a team's home lat/long coordinates, and a reference distance
'''
global stadium_locs
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
travel_distance = geodesic_distance(home_lat, home_lng, venue_lat, venue_lng)
return 1 - abs(travel_distance - reference_distance)
def matchup(team_1, team_2, venue = None):
ts = time.time()
global team_homes, stadium_locs
team_1_home = team_homes[1][team_1]
team_2_home = team_homes[1][team_2]
if venue is None:
venue = team_homes[1][team_1]
(venue_lat, venue_lng) = stadium_locs.loc[venue, ['Lat', 'Long']]
(team_1_home_lat, team_1_home_lng) = stadium_locs.loc[team_1_home, ['Lat', 'Long']]
(team_2_home_lat, team_2_home_lng) = stadium_locs.loc[team_2_home, ['Lat', 'Long']]
team_1_reference_distance = geodesic_distance(team_1_home_lat, team_1_home_lng, venue_lat, venue_lng)
team_2_reference_distance = geodesic_distance(team_2_home_lat, team_2_home_lng, venue_lat, venue_lng)
def get_team_1_weight(location):
return get_travel_weight(location, team_1_home_lat, team_1_home_lng, team_1_reference_distance)
def get_team_2_weight(location):
return get_travel_weight(location, team_2_home_lat, team_2_home_lng, team_2_reference_distance)
team_1_season = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team_1 + '.csv'))
team_2_season = pd.DataFrame.from_csv(os.path.join(teamsheetpath, team_2 + '.csv'))
team_1_season['Weight'] = team_1_season['VENUE'].apply(get_team_1_weight)
team_2_season['Weight'] = team_2_season['VENUE'].apply(get_team_2_weight)
stats_1 = get_residual_performance(team_1_season)
stats_2 = get_residual_performance(team_2_season)
expected_scores_1 = get_expected_scores(stats_1, stats_2, team_1_season, team_2_season)
expected_scores_2 = get_expected_scores(stats_2, stats_1, team_2_season, team_1_season)
team_1_wins = 0
team_2_wins = 0
team_1_draws = 0
team_2_draws = 0
team_1_bpw = 0
team_2_bpw = 0
team_1_bpd = 0
team_2_bpd = 0
team_1_bpl = 0
team_2_bpl = 0
team_1_lbp = 0
team_2_lbp = 0
team_1_scores = []
team_2_scores = []
i = 0
error = 1
while error > 0.000001 or i < 5000000: #Run until convergence after 5 million iterations
summary = game(team_1, team_2,
expected_scores_1, expected_scores_2,
playoff = po)
team_1_prev_wins = team_1_wins
team_1_wins += summary[team_1][0]
team_2_wins += summary[team_2][0]
team_1_draws += summary[team_1][1]
team_2_draws += summary[team_2][1]
team_1_scores.append(summary[team_1][2])
team_2_scores.append(summary[team_2][2])
team_1_bpw += summary[team_1][3]
team_2_bpw += summary[team_2][3]
team_1_bpd += summary[team_1][4]
team_2_bpd += summary[team_2][4]
team_1_bpl += summary[team_1][5]
team_2_bpl += summary[team_2][5]
team_1_lbp += summary[team_1][6]
team_2_lbp += summary[team_2][6]
team_1_prob = float(team_1_wins) / len(team_1_scores)
team_2_prob = float(team_2_wins) / len(team_2_scores)
team_1_bpw_prob = float(team_1_bpw) / len(team_1_scores)
team_2_bpw_prob = float(team_2_bpw) / len(team_2_scores)
team_1_bpd_prob = float(team_1_bpd) / len(team_1_scores)
team_2_bpd_prob = float(team_2_bpd) / len(team_2_scores)
team_1_bpl_prob = float(team_1_bpl) / len(team_1_scores)
team_2_bpl_prob = float(team_2_bpl) / len(team_2_scores)
team_1_lbp_prob = float(team_1_lbp) / len(team_1_scores)
team_2_lbp_prob = float(team_2_lbp) / len(team_2_scores)
if i > 0:
team_1_prev_prob = float(team_1_prev_wins) / i
error = team_1_prob - team_1_prev_prob
i = i + 1
if i == 5000000:
print('Probability converged within 5 million iterations')
else:
print('Probability converged after ' + str(i) + ' iterations')
games = pd.DataFrame.from_items([(team_1, team_1_scores), (team_2, team_2_scores)])
pre_summaries = games.describe(percentiles = list(np.linspace(0.05, 0.95, 19)))
summaries = pd.DataFrame(columns = pre_summaries.columns)
summaries.loc['mean'] = pre_summaries.loc['mean']
for i in pre_summaries.index:
try:
percentile = int(round(float(i[:-1])))
summaries.loc['{}%'.format(percentile)] = pre_summaries.loc[i]
except ValueError:
continue
summaries = summaries.reset_index()
for item in summaries.index:
try:
summaries['index'][item] = str(int(float(summaries['index'][item][:-1]))) + '%'
except ValueError:
continue
bonus_points = pd.DataFrame(index = ['4-Try Bonus Point with Win',
'4-Try Bonus Point with Draw',
'4-Try Bonus Point with Loss',
'Losing Bonus Point'])
bonus_points[team_1] = [team_1_bpw_prob, team_1_bpd_prob, team_1_bpl_prob, team_1_lbp_prob]
bonus_points[team_2] = [team_2_bpw_prob, team_2_bpd_prob, team_2_bpl_prob, team_2_lbp_prob]
summaries = summaries.set_index('index')
summaries = summaries.groupby(level = 0).last()
output = {'ProbWin': {team_1: team_1_prob, team_2: team_2_prob}, 'Scores': summaries, 'Bonus Points': bonus_points}
print(team_1 + '/' + team_2 + ' score distributions computed in ' + str(round(time.time() - ts, 1)) + ' seconds')
return output
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import logging
import traceback
from . import Command
from ..benchmarks import Benchmarks
from ..console import log
from ..machine import Machine
from ..repo import get_repo
from ..results import (Results, find_latest_result_hash, get_existing_hashes,
iter_results_for_machine_and_hash)
from ..branch_cache import BranchCache
from .. import environment
from .. import util
from .setup import Setup
from . import common_args
def _do_build(args):
env, conf, commit_hash = args
try:
with log.set_level(logging.WARN):
env.install_project(conf, commit_hash)
except util.ProcessError:
return False
return True
def _do_build_multiprocess(args):
"""
multiprocessing callback to build the project in one particular
environment.
"""
try:
return _do_build(args)
except BaseException as exc:
raise util.ParallelFailure(str(exc), exc.__class__, traceback.format_exc())
class Run(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"run", help="Run a benchmark suite",
description="Run a benchmark suite.")
parser.add_argument(
'range', nargs='?', default=None,
help="""Range of commits to benchmark. For a git
repository, this is passed as the first argument to ``git
log``. See 'specifying ranges' section of the
`gitrevisions` manpage for more info. Also accepts the
special values 'NEW', 'ALL', and 'EXISTING'. 'NEW' will
benchmark all commits since the latest benchmarked on this
machine. 'ALL' will benchmark all commits in the project.
'EXISTING' will benchmark against all commits for which
there are existing benchmarks on any machine. By default,
will benchmark the head of the current master branch.""")
parser.add_argument(
"--steps", "-s", type=common_args.positive_int, default=None,
help="""Maximum number of steps to benchmark. This is
used to subsample the commits determined by range to a
reasonable number.""")
common_args.add_bench(parser)
parser.add_argument(
"--profile", "-p", action="store_true",
help="""In addition to timing, run the benchmarks through
the `cProfile` profiler and store the results.""")
common_args.add_parallel(parser)
common_args.add_show_stderr(parser)
parser.add_argument(
"--quick", "-q", action="store_true",
help="""Do a "quick" run, where each benchmark function is
run only once. This is useful to find basic errors in the
benchmark functions faster. The results are unlikely to
be useful, and thus are not saved.""")
common_args.add_environment(parser)
parser.add_argument(
"--dry-run", "-n", action="store_true",
default=None,
help="""Do not save any results to disk.""")
common_args.add_machine(parser)
parser.add_argument(
"--skip-existing-successful", action="store_true",
help="""Skip running benchmarks that have previous successful
results""")
parser.add_argument(
"--skip-existing-failed", action="store_true",
help="""Skip running benchmarks that have previous failed
results""")
parser.add_argument(
"--skip-existing-commits", action="store_true",
help="""Skip running benchmarks for commits that have existing
results""")
parser.add_argument(
"--skip-existing", "-k", action="store_true",
help="""Skip running benchmarks that have previous successful
or failed results""")
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args, **kwargs):
return cls.run(
conf=conf, range_spec=args.range, steps=args.steps,
bench=args.bench, parallel=args.parallel,
show_stderr=args.show_stderr, quick=args.quick,
profile=args.profile, env_spec=args.env_spec,
dry_run=args.dry_run, machine=args.machine,
skip_successful=args.skip_existing_successful or args.skip_existing,
skip_failed=args.skip_existing_failed or args.skip_existing,
skip_existing_commits=args.skip_existing_commits,
**kwargs
)
@classmethod
def run(cls, conf, range_spec=None, steps=None, bench=None, parallel=1,
show_stderr=False, quick=False, profile=False, env_spec=None,
dry_run=False, machine=None, _machine_file=None, skip_successful=False,
skip_failed=False, skip_existing_commits=False, _returns={}):
params = {}
machine_params = Machine.load(
machine_name=machine,
_path=_machine_file, interactive=True)
params.update(machine_params.__dict__)
machine_params.save(conf.results_dir)
environments = list(environment.get_environments(conf, env_spec))
if environment.is_existing_only(environments):
# No repository required, so skip using it
conf.dvcs = "none"
repo = get_repo(conf)
repo.pull()
if range_spec is None:
commit_hashes = [repo.get_hash_from_master()]
elif range_spec == 'EXISTING':
commit_hashes = [h for h, d in get_existing_hashes(
conf.results_dir)]
elif range_spec in ('NEW', 'ALL'):
branch_cache = BranchCache(conf, repo)
commit_hashes = []
seen = set()
for branch in conf.branches:
if range_spec == 'NEW':
branch_hashes = branch_cache.get_branch_commits(branch)
latest_result = find_latest_result_hash(
machine_params.machine, conf.results_dir,
hashes=branch_hashes)
spec = repo.get_new_range_spec(latest_result, branch)
else:
spec = repo.get_branch_range_spec(branch)
new_hashes = repo.get_hashes_from_range(spec)
for commit_hash in new_hashes:
if commit_hash not in seen:
seen.add(commit_hash)
commit_hashes.append(commit_hash)
elif isinstance(range_spec, list):
commit_hashes = range_spec
else:
commit_hashes = repo.get_hashes_from_range(range_spec)
if len(commit_hashes) == 0:
log.error("No commit hashes selected")
return 1
if steps is not None:
commit_hashes = util.pick_n(commit_hashes, steps)
Setup.perform_setup(environments, parallel=parallel)
if len(environments) == 0:
log.error("No environments selected")
return 1
if range_spec is not None:
for env in environments:
if not env.can_install_project():
raise util.UserError(
"No range spec may be specified if benchmarking in "
"an existing environment")
benchmarks = Benchmarks(conf, environments, regex=bench)
if len(benchmarks) == 0:
log.error("No benchmarks selected")
return 1
benchmarks.save()
steps = len(commit_hashes) * len(benchmarks) * len(environments)
log.info(
"Running {0} total benchmarks "
"({1} commits * {2} environments * {3} benchmarks)".format(
steps, len(commit_hashes),
len(environments), len(benchmarks)), "green")
log.set_nitems(steps)
parallel, multiprocessing = util.get_multiprocessing(parallel)
_returns['benchmarks'] = benchmarks
_returns['environments'] = environments
_returns['machine_params'] = machine_params.__dict__
for commit_hash in commit_hashes:
skipped_benchmarks = set()
if skip_successful or skip_failed or skip_existing_commits:
try:
for result in iter_results_for_machine_and_hash(
conf.results_dir, machine_params.machine, commit_hash):
if skip_existing_commits:
skipped_benchmarks.update(benchmarks)
break
for key, value in six.iteritems(result.results):
failed = value is None or (isinstance(value, dict) and None in value['result'])
if skip_failed and failed:
skipped_benchmarks.add(key)
if skip_successful and not failed:
skipped_benchmarks.add(key)
except IOError:
pass
for env in environments:
for bench in benchmarks:
if bench in skipped_benchmarks:
log.step()
if not set(six.iterkeys(benchmarks)).difference(skipped_benchmarks):
continue
if commit_hash:
log.info(
"For {0} commit hash {1}:".format(
conf.project, commit_hash[:8]))
with log.indent():
for subenv in util.iter_chunks(environments, parallel):
log.info("Building for {0}".format(
', '.join([x.name for x in subenv])))
with log.indent():
args = [(env, conf, commit_hash) for env in subenv]
if parallel != 1:
pool = multiprocessing.Pool(parallel)
try:
successes = pool.map(_do_build_multiprocess, args)
except util.ParallelFailure as exc:
exc.reraise()
finally:
pool.close()
else:
successes = map(_do_build, args)
for env, success in zip(subenv, successes):
if success:
params['python'] = env.python
params.update(env.requirements)
results = benchmarks.run_benchmarks(
env, show_stderr=show_stderr, quick=quick,
profile=profile, skip=skipped_benchmarks)
else:
results = benchmarks.skip_benchmarks(env)
if dry_run or isinstance(env, environment.ExistingEnvironment):
continue
result = Results(
params,
env.requirements,
commit_hash,
repo.get_date(commit_hash),
env.python,
env.name)
for benchmark_name, d in six.iteritems(results):
result.add_time(benchmark_name, d['result'])
if 'profile' in d:
result.add_profile(
benchmark_name,
d['profile'])
result.update_save(conf.results_dir)
|
|
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=W0613,no-member,attribute-defined-outside-init
"""
Some "standard" instruments to collect additional info about workload execution.
.. note:: The run() method of a Workload may perform some "boilerplate" as well as
the actual execution of the workload (e.g. it may contain UI automation
needed to start the workload). This "boilerplate" execution will also
be measured by these instruments. As such, they are not suitable for collected
precise data about specific operations.
"""
import os
import re
import logging
import time
import tarfile
from itertools import izip, izip_longest
from subprocess import CalledProcessError
from wlauto import Instrument, Parameter
from wlauto.core import signal
from wlauto.exceptions import DeviceError, ConfigError
from wlauto.utils.misc import diff_tokens, write_table, check_output, as_relative
from wlauto.utils.misc import ensure_file_directory_exists as _f
from wlauto.utils.misc import ensure_directory_exists as _d
from wlauto.utils.android import ApkInfo
from wlauto.utils.types import list_of_strings
logger = logging.getLogger(__name__)
class FsExtractor(Instrument):
mount_command = 'mount -t tmpfs -o size={} tmpfs {}'
extract_timeout = 30
tarname = 'sysfs.tar'
DEVICE_PATH = 0
BEFORE_PATH = 1
AFTER_PATH = 2
DIFF_PATH = 3
parameters = [
Parameter('paths', kind=list_of_strings, mandatory=True,
description="""A list of paths to be pulled from the device. These could be directories
as well as files.""",
global_alias='sysfs_extract_dirs'),
Parameter('use_tmpfs', kind=bool, default=None,
description="""
Specifies whether tmpfs should be used to cache sysfile trees and then pull them down
as a tarball. This is significantly faster then just copying the directory trees from
the device directly, bur requres root and may not work on all devices. Defaults to
``True`` if the device is rooted and ``False`` if it is not.
"""),
Parameter('tmpfs_mount_point', default=None,
description="""Mount point for tmpfs partition used to store snapshots of paths."""),
Parameter('tmpfs_size', default='32m',
description="""Size of the tempfs partition."""),
]
def initialize_tmpfs(self, context):
if not self.device.is_rooted and self.use_tmpfs: # pylint: disable=access-member-before-definition
raise ConfigError('use_tempfs must be False for an unrooted device.')
elif self.use_tmpfs is None: # pylint: disable=access-member-before-definition
self.use_tmpfs = self.device.is_rooted
if self.use_tmpfs:
self.on_device_before = self.device.path.join(self.tmpfs_mount_point, 'before')
self.on_device_after = self.device.path.join(self.tmpfs_mount_point, 'after')
if not self.device.file_exists(self.tmpfs_mount_point):
self.device.execute('mkdir -p {}'.format(self.tmpfs_mount_point), as_root=True)
self.device.execute(self.mount_command.format(self.tmpfs_size, self.tmpfs_mount_point),
as_root=True)
def setup(self, context):
before_dirs = [
_d(os.path.join(context.output_directory, 'before', self._local_dir(d)))
for d in self.paths
]
after_dirs = [
_d(os.path.join(context.output_directory, 'after', self._local_dir(d)))
for d in self.paths
]
diff_dirs = [
_d(os.path.join(context.output_directory, 'diff', self._local_dir(d)))
for d in self.paths
]
self.device_and_host_paths = zip(self.paths, before_dirs, after_dirs, diff_dirs)
if self.use_tmpfs:
for d in self.paths:
before_dir = self.device.path.join(self.on_device_before,
self.device.path.dirname(as_relative(d)))
after_dir = self.device.path.join(self.on_device_after,
self.device.path.dirname(as_relative(d)))
if self.device.file_exists(before_dir):
self.device.execute('rm -rf {}'.format(before_dir), as_root=True)
self.device.execute('mkdir -p {}'.format(before_dir), as_root=True)
if self.device.file_exists(after_dir):
self.device.execute('rm -rf {}'.format(after_dir), as_root=True)
self.device.execute('mkdir -p {}'.format(after_dir), as_root=True)
def slow_start(self, context):
if self.use_tmpfs:
for d in self.paths:
dest_dir = self.device.path.join(self.on_device_before, as_relative(d))
if '*' in dest_dir:
dest_dir = self.device.path.dirname(dest_dir)
self.device.execute('{} cp -Hr {} {}'.format(self.device.busybox, d, dest_dir),
as_root=True, check_exit_code=False)
else: # not rooted
for dev_dir, before_dir, _, _ in self.device_and_host_paths:
self.device.pull_file(dev_dir, before_dir)
def slow_stop(self, context):
if self.use_tmpfs:
for d in self.paths:
dest_dir = self.device.path.join(self.on_device_after, as_relative(d))
if '*' in dest_dir:
dest_dir = self.device.path.dirname(dest_dir)
self.device.execute('{} cp -Hr {} {}'.format(self.device.busybox, d, dest_dir),
as_root=True, check_exit_code=False)
else: # not using tmpfs
for dev_dir, _, after_dir, _ in self.device_and_host_paths:
self.device.pull_file(dev_dir, after_dir)
def update_result(self, context):
if self.use_tmpfs:
on_device_tarball = self.device.path.join(self.device.working_directory, self.tarname)
on_host_tarball = self.device.path.join(context.output_directory, self.tarname + ".gz")
self.device.execute('{} tar cf {} -C {} .'.format(self.device.busybox,
on_device_tarball,
self.tmpfs_mount_point),
as_root=True)
self.device.execute('chmod 0777 {}'.format(on_device_tarball), as_root=True)
self.device.execute('{} gzip -f {}'.format(self.device.busybox,
on_device_tarball))
self.device.pull_file(on_device_tarball + ".gz", on_host_tarball)
with tarfile.open(on_host_tarball, 'r:gz') as tf:
tf.extractall(context.output_directory)
self.device.delete_file(on_device_tarball + ".gz")
os.remove(on_host_tarball)
for paths in self.device_and_host_paths:
after_dir = paths[self.AFTER_PATH]
dev_dir = paths[self.DEVICE_PATH].strip('*') # remove potential trailing '*'
if (not os.listdir(after_dir) and
self.device.file_exists(dev_dir) and
self.device.listdir(dev_dir)):
self.logger.error('sysfs files were not pulled from the device.')
self.device_and_host_paths.remove(paths) # Path is removed to skip diffing it
for _, before_dir, after_dir, diff_dir in self.device_and_host_paths:
_diff_sysfs_dirs(before_dir, after_dir, diff_dir)
def teardown(self, context):
self._one_time_setup_done = []
def finalize(self, context):
if self.use_tmpfs:
try:
self.device.execute('umount {}'.format(self.tmpfs_mount_point), as_root=True)
except (DeviceError, CalledProcessError):
# assume a directory but not mount point
pass
self.device.execute('rm -rf {}'.format(self.tmpfs_mount_point),
as_root=True, check_exit_code=False)
def validate(self):
if not self.tmpfs_mount_point: # pylint: disable=access-member-before-definition
self.tmpfs_mount_point = self.device.path.join(self.device.working_directory, 'temp-fs')
def _local_dir(self, directory):
return os.path.dirname(as_relative(directory).replace(self.device.path.sep, os.sep))
class SysfsExtractor(FsExtractor):
name = 'sysfs_extractor'
description = """
Collects the contest of a set of directories, before and after workload execution
and diffs the result.
"""
def initialize(self, context):
self.initialize_tmpfs(context)
class ExecutionTimeInstrument(Instrument):
name = 'execution_time'
description = """
Measure how long it took to execute the run() methods of a Workload.
"""
priority = 15
def __init__(self, device, **kwargs):
super(ExecutionTimeInstrument, self).__init__(device, **kwargs)
self.start_time = None
self.end_time = None
def on_run_start(self, context):
signal.connect(self.get_start_time, signal.BEFORE_WORKLOAD_EXECUTION, priority=self.priority)
signal.connect(self.get_stop_time, signal.AFTER_WORKLOAD_EXECUTION, priority=self.priority)
def get_start_time(self, context):
self.start_time = time.time()
def get_stop_time(self, context):
self.end_time = time.time()
def update_result(self, context):
execution_time = self.end_time - self.start_time
context.result.add_metric('execution_time', execution_time, 'seconds')
class InterruptStatsInstrument(Instrument):
name = 'interrupts'
description = """
Pulls the ``/proc/interrupts`` file before and after workload execution and diffs them
to show what interrupts occurred during that time.
"""
def __init__(self, device, **kwargs):
super(InterruptStatsInstrument, self).__init__(device, **kwargs)
self.before_file = None
self.after_file = None
self.diff_file = None
def setup(self, context):
self.before_file = os.path.join(context.output_directory, 'before', 'proc', 'interrupts')
self.after_file = os.path.join(context.output_directory, 'after', 'proc', 'interrupts')
self.diff_file = os.path.join(context.output_directory, 'diff', 'proc', 'interrupts')
def start(self, context):
with open(_f(self.before_file), 'w') as wfh:
wfh.write(self.device.execute('cat /proc/interrupts'))
def stop(self, context):
with open(_f(self.after_file), 'w') as wfh:
wfh.write(self.device.execute('cat /proc/interrupts'))
def update_result(self, context):
# If workload execution failed, the after_file may not have been created.
if os.path.isfile(self.after_file):
_diff_interrupt_files(self.before_file, self.after_file, _f(self.diff_file))
class DynamicFrequencyInstrument(FsExtractor):
name = 'cpufreq'
description = """
Collects dynamic frequency (DVFS) settings before and after workload execution.
"""
tarname = 'cpufreq.tar'
parameters = [
Parameter('paths', mandatory=False, override=True),
]
def initialize(self, context):
self.initialize_tmpfs(context)
def setup(self, context):
self.paths = ['/sys/devices/system/cpu']
if self.use_tmpfs:
self.paths.append('/sys/class/devfreq/*') # the '*' would cause problems for adb pull.
super(DynamicFrequencyInstrument, self).setup(context)
def validate(self):
# temp-fs would have been set in super's validate, if not explicitly specified.
if not self.tmpfs_mount_point.endswith('-cpufreq'): # pylint: disable=access-member-before-definition
self.tmpfs_mount_point += '-cpufreq'
def _diff_interrupt_files(before, after, result): # pylint: disable=R0914
output_lines = []
with open(before) as bfh:
with open(after) as ofh:
for bline, aline in izip(bfh, ofh):
bchunks = bline.strip().split()
while True:
achunks = aline.strip().split()
if achunks[0] == bchunks[0]:
diffchunks = ['']
diffchunks.append(achunks[0])
diffchunks.extend([diff_tokens(b, a) for b, a
in zip(bchunks[1:], achunks[1:])])
output_lines.append(diffchunks)
break
else: # new category appeared in the after file
diffchunks = ['>'] + achunks
output_lines.append(diffchunks)
try:
aline = ofh.next()
except StopIteration:
break
# Offset heading columns by one to allow for row labels on subsequent
# lines.
output_lines[0].insert(0, '')
# Any "columns" that do not have headings in the first row are not actually
# columns -- they are a single column where space-spearated words got
# split. Merge them back together to prevent them from being
# column-aligned by write_table.
table_rows = [output_lines[0]]
num_cols = len(output_lines[0])
for row in output_lines[1:]:
table_row = row[:num_cols]
table_row.append(' '.join(row[num_cols:]))
table_rows.append(table_row)
with open(result, 'w') as wfh:
write_table(table_rows, wfh)
def _diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
before_files = []
os.path.walk(before,
lambda arg, dirname, names: arg.extend([os.path.join(dirname, f) for f in names]),
before_files
)
before_files = filter(os.path.isfile, before_files)
files = [os.path.relpath(f, before) for f in before_files]
after_files = [os.path.join(after, f) for f in files]
diff_files = [os.path.join(result, f) for f in files]
for bfile, afile, dfile in zip(before_files, after_files, diff_files):
if not os.path.isfile(afile):
logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile))
continue
with open(bfile) as bfh, open(afile) as afh: # pylint: disable=C0321
with open(_f(dfile), 'w') as dfh:
for i, (bline, aline) in enumerate(izip_longest(bfh, afh), 1):
if aline is None:
logger.debug('Lines missing from {}'.format(afile))
break
bchunks = re.split(r'(\W+)', bline)
achunks = re.split(r'(\W+)', aline)
if len(bchunks) != len(achunks):
logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
dfh.write('xxx ' + bline)
continue
if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
(bchunks[0] == achunks[0])):
# if there are only two columns and the first column is the
# same, assume it's a "header" column and do not diff it.
dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
else:
dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
dfh.write(''.join(dchunks))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type
from unittest import mock
from urllib.parse import ParseResult, urlparse
import pytest
import yaml
from _pytest._code import ExceptionInfo
from botocore.exceptions import ClientError
from freezegun import freeze_time
from moto.core import ACCOUNT_ID
from moto.core.exceptions import AWSError
from moto.eks.exceptions import (
InvalidParameterException,
InvalidRequestException,
ResourceInUseException,
ResourceNotFoundException,
)
from moto.eks.models import (
CLUSTER_EXISTS_MSG,
CLUSTER_IN_USE_MSG,
CLUSTER_NOT_FOUND_MSG,
CLUSTER_NOT_READY_MSG,
FARGATE_PROFILE_EXISTS_MSG,
FARGATE_PROFILE_NEEDS_SELECTOR_MSG,
FARGATE_PROFILE_NOT_FOUND_MSG,
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
FARGATE_PROFILE_TOO_MANY_LABELS,
LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG,
LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG,
NODEGROUP_EXISTS_MSG,
NODEGROUP_NOT_FOUND_MSG,
)
from airflow.providers.amazon.aws.hooks.eks import EksHook
from ..utils.eks_test_constants import (
DEFAULT_CONN_ID,
DEFAULT_NAMESPACE,
DISK_SIZE,
FROZEN_TIME,
INSTANCE_TYPES,
LAUNCH_TEMPLATE,
MAX_FARGATE_LABELS,
NODEGROUP_OWNERSHIP_TAG_DEFAULT_VALUE,
NODEGROUP_OWNERSHIP_TAG_KEY,
NON_EXISTING_CLUSTER_NAME,
NON_EXISTING_FARGATE_PROFILE_NAME,
NON_EXISTING_NODEGROUP_NAME,
PACKAGE_NOT_PRESENT_MSG,
PARTITION,
POD_EXECUTION_ROLE_ARN,
REGION,
REMOTE_ACCESS,
BatchCountSize,
ClusterAttributes,
ClusterInputs,
ErrorAttributes,
FargateProfileAttributes,
FargateProfileInputs,
NodegroupAttributes,
NodegroupInputs,
PossibleTestResults,
RegExTemplates,
ResponseAttributes,
)
from ..utils.eks_test_utils import (
attributes_to_test,
generate_clusters,
generate_dict,
generate_fargate_profiles,
generate_nodegroups,
iso_date,
region_matches_partition,
)
try:
from moto import mock_eks
except ImportError:
mock_eks = None
@pytest.fixture(scope="function")
def cluster_builder():
"""A fixture to generate a batch of EKS Clusters on the mocked backend for testing."""
class ClusterTestDataFactory:
"""A Factory class for building the Cluster objects."""
def __init__(self, count: int, minimal: bool) -> None:
# Generate 'count' number of Cluster objects.
self.cluster_names: List[str] = generate_clusters(
eks_hook=eks_hook, num_clusters=count, minimal=minimal
)
self.existing_cluster_name: str = self.cluster_names[0]
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_cluster() for the first Cluster.
self.cluster_describe_output: Dict = eks_hook.describe_cluster(name=self.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
# Generate a list of the Cluster attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=ClusterInputs, cluster_name=self.existing_cluster_name
)
def _execute(count: int = 1, minimal: bool = True) -> Tuple[EksHook, ClusterTestDataFactory]:
return eks_hook, ClusterTestDataFactory(count=count, minimal=minimal)
mock_eks().start()
eks_hook = EksHook(
aws_conn_id=DEFAULT_CONN_ID,
region_name=REGION,
)
yield _execute
mock_eks().stop()
@pytest.fixture(scope="function")
def fargate_profile_builder(cluster_builder):
"""A fixture to generate a batch of EKS Fargate profiles on the mocked backend for testing."""
class FargateProfileTestDataFactory:
"""A Factory class for building the Fargate profile objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name = cluster.existing_cluster_name
# Generate 'count' number of FargateProfile objects.
self.fargate_profile_names = generate_fargate_profiles(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_profiles=count,
minimal=minimal,
)
# Get the name of the first generated profile.
self.existing_fargate_profile_name: str = self.fargate_profile_names[0]
self.nonexistent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_fargate_profiles() for the first profile.
self.fargate_describe_output: Dict = eks_hook.describe_fargate_profile(
clusterName=self.cluster_name, fargateProfileName=self.existing_fargate_profile_name
)[ResponseAttributes.FARGATE_PROFILE]
# Generate a list of the Fargate Profile attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=FargateProfileInputs,
cluster_name=self.cluster_name,
fargate_profile_name=self.existing_fargate_profile_name,
)
def _execute(count: int = 1, minimal: bool = True) -> Tuple[EksHook, FargateProfileTestDataFactory]:
return eks_hook, FargateProfileTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute
@pytest.fixture(scope="function")
def nodegroup_builder(cluster_builder):
"""A fixture to generate a batch of EKS Managed Nodegroups on the mocked backend for testing."""
class NodegroupTestDataFactory:
"""A Factory class for building the Nodegroup objects."""
def __init__(self, count: int, minimal: bool) -> None:
self.cluster_name: str = cluster.existing_cluster_name
# Generate 'count' number of Nodegroup objects.
self.nodegroup_names: List[str] = generate_nodegroups(
eks_hook=eks_hook,
cluster_name=self.cluster_name,
num_nodegroups=count,
minimal=minimal,
)
# Get the name of the first generated Nodegroup.
self.existing_nodegroup_name: str = self.nodegroup_names[0]
self.nonexistent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
self.nonexistent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
# Collect the output of describe_nodegroup() for the first Nodegroup.
self.nodegroup_describe_output: Dict = eks_hook.describe_nodegroup(
clusterName=self.cluster_name, nodegroupName=self.existing_nodegroup_name
)[ResponseAttributes.NODEGROUP]
# Generate a list of the Nodegroup attributes to be tested when validating results.
self.attributes_to_test: List[Tuple] = attributes_to_test(
inputs=NodegroupInputs,
cluster_name=self.cluster_name,
nodegroup_name=self.existing_nodegroup_name,
)
def _execute(count: int = 1, minimal: bool = True) -> Tuple[EksHook, NodegroupTestDataFactory]:
return eks_hook, NodegroupTestDataFactory(count=count, minimal=minimal)
eks_hook, cluster = cluster_builder()
return _execute
@pytest.mark.skipif(mock_eks is None, reason=PACKAGE_NOT_PRESENT_MSG)
class TestEksHooks:
def test_hook(self, cluster_builder) -> None:
eks_hook, _ = cluster_builder()
assert eks_hook.get_conn() is not None
assert eks_hook.aws_conn_id == DEFAULT_CONN_ID
assert eks_hook.region_name == REGION
###
# This specific test does not use the fixture since
# it is intended to verify that there are no clusters
# in the list at initialization, which means the mock
# decorator must be used manually in this one case.
###
@mock_eks
def test_list_clusters_returns_empty_by_default(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
result: List = eks_hook.list_clusters()
assert isinstance(result, list)
assert len(result) == 0
def test_list_clusters_returns_sorted_cluster_names(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.cluster_names)
result: List = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_clusters_returns_all_results(
self, cluster_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.cluster_names)
result: List = eks_hook.list_clusters()
assert_result_matches_expected_list(result, expected_result)
def test_create_cluster_throws_exception_when_cluster_exists(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_EXISTS_MSG.format(
clusterName=generated_test_data.existing_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_cluster(
name=generated_test_data.existing_cluster_name, **dict(ClusterInputs.REQUIRED) # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new cluster was created.
len_after_test: int = len(eks_hook.list_clusters())
assert len_after_test == initial_batch_size
def test_create_cluster_generates_valid_cluster_arn(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_names,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.CLUSTER_ARN,
arn_under_test=generated_test_data.cluster_describe_output[ClusterAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_cluster_generates_valid_cluster_created_timestamp(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_time: datetime = generated_test_data.cluster_describe_output[ClusterAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_cluster_generates_valid_cluster_endpoint(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_endpoint: str = generated_test_data.cluster_describe_output[ClusterAttributes.ENDPOINT]
assert_is_valid_uri(result_endpoint)
def test_create_cluster_generates_valid_oidc_identity(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder()
result_issuer: str = generated_test_data.cluster_describe_output[ClusterAttributes.IDENTITY][
ClusterAttributes.OIDC
][ClusterAttributes.ISSUER]
assert_is_valid_uri(result_issuer)
def test_create_cluster_saves_provided_parameters(self, cluster_builder) -> None:
_, generated_test_data = cluster_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.cluster_describe_output[key] == expected_value
def test_describe_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_returns_deleted_cluster(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)[
ResponseAttributes.CLUSTER
]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_cluster_removes_deleted_cluster(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size, minimal=False)
eks_hook.delete_cluster(name=generated_test_data.existing_cluster_name)
result_cluster_list: List = eks_hook.list_clusters()
assert len(result_cluster_list) == (initial_batch_size - 1)
assert generated_test_data.existing_cluster_name not in result_cluster_list
def test_delete_cluster_throws_exception_when_cluster_not_found(
self, cluster_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = cluster_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.nonexistent_cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify nothing was deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == initial_batch_size
def test_list_nodegroups_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
def test_list_nodegroups_returns_sorted_nodegroup_names(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.nodegroup_names)
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_nodegroups_returns_all_results(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.nodegroup_names)
result: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_eks
def test_create_nodegroup_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=non_existent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=non_existent_cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_create_nodegroup_throws_exception_when_nodegroup_already_exists(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = NODEGROUP_EXISTS_MSG.format(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_throws_exception_when_cluster_not_active(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
non_existent_nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.isActive", return_value=False):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=non_existent_nodegroup_name,
**dict(NodegroupInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
def test_create_nodegroup_generates_valid_nodegroup_arn(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.nodegroup_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.NODEGROUP_ARN,
arn_under_test=generated_test_data.nodegroup_describe_output[NodegroupAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_nodegroup_generates_valid_nodegroup_created_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: datetime = generated_test_data.nodegroup_describe_output[NodegroupAttributes.CREATED_AT]
assert iso_date(result_time) == FROZEN_TIME
@freeze_time(FROZEN_TIME)
def test_create_nodegroup_generates_valid_nodegroup_modified_timestamp(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_time: datetime = generated_test_data.nodegroup_describe_output[NodegroupAttributes.MODIFIED_AT]
assert iso_date(result_time) == FROZEN_TIME
def test_create_nodegroup_generates_valid_autoscaling_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: Dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_asg_name: str = result_resources[NodegroupAttributes.AUTOSCALING_GROUPS][0][
NodegroupAttributes.NAME
]
assert RegExTemplates.NODEGROUP_ASG_NAME_PATTERN.match(result_asg_name)
def test_create_nodegroup_generates_valid_security_group_name(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
result_resources: Dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.RESOURCES]
result_security_group: str = result_resources[NodegroupAttributes.REMOTE_ACCESS_SG]
assert RegExTemplates.NODEGROUP_SECURITY_GROUP_NAME_PATTERN.match(result_security_group)
def test_create_nodegroup_saves_provided_parameters(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.nodegroup_describe_output[key] == expected_value
def test_create_nodegroup_without_tags_uses_default(self, nodegroup_builder) -> None:
_, generated_test_data = nodegroup_builder()
tag_list: Dict = generated_test_data.nodegroup_describe_output[NodegroupAttributes.TAGS]
ownership_tag_key: str = NODEGROUP_OWNERSHIP_TAG_KEY.format(
cluster_name=generated_test_data.cluster_name
)
assert tag_list.get(ownership_tag_key) == NODEGROUP_OWNERSHIP_TAG_DEFAULT_VALUE
def test_create_nodegroup_with_ownership_tag_uses_provided_value(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
ownership_tag_key: str = NODEGROUP_OWNERSHIP_TAG_KEY.format(cluster_name=cluster_name)
provided_tag_value: str = "shared"
created_nodegroup: Dict = eks_hook.create_nodegroup(
clusterName=cluster_name,
nodegroupName="nodegroup",
tags={ownership_tag_key: provided_tag_value},
**dict(deepcopy(NodegroupInputs.REQUIRED)),
)[ResponseAttributes.NODEGROUP]
assert created_nodegroup.get(NodegroupAttributes.TAGS).get(ownership_tag_key) == provided_tag_value
def test_describe_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_nodegroup_throws_exception_when_nodegroup_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_cluster_throws_exception_when_nodegroups_exist(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = CLUSTER_IN_USE_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_cluster(name=generated_test_data.cluster_name)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no clusters were deleted.
cluster_count_after_test: int = len(eks_hook.list_clusters())
assert cluster_count_after_test == BatchCountSize.SINGLE
def test_delete_nodegroup_removes_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
result_nodegroup_list: List = eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
assert len(result_nodegroup_list) == (initial_batch_size - 1)
assert generated_test_data.existing_nodegroup_name not in result_nodegroup_list
def test_delete_nodegroup_returns_deleted_nodegroup(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)[ResponseAttributes.NODEGROUP]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_nodegroup_throws_exception_when_cluster_not_found(self, nodegroup_builder) -> None:
eks_hook, generated_test_data = nodegroup_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.nonexistent_cluster_name,
nodegroupName=generated_test_data.existing_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_nodegroup_throws_exception_when_nodegroup_not_found(
self, nodegroup_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = nodegroup_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = NODEGROUP_NOT_FOUND_MSG.format(
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_nodegroup(
clusterName=generated_test_data.cluster_name,
nodegroupName=generated_test_data.nonexistent_nodegroup_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new nodegroup was created.
nodegroup_count_after_test: int = len(
eks_hook.list_nodegroups(clusterName=generated_test_data.cluster_name)
)
assert nodegroup_count_after_test == initial_batch_size
# If launch_template is specified, you can not specify instanceTypes, diskSize, or remoteAccess.
test_cases = [
# Happy Paths
(LAUNCH_TEMPLATE, None, None, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, INSTANCE_TYPES, None, None, PossibleTestResults.SUCCESS),
(None, None, DISK_SIZE, None, PossibleTestResults.SUCCESS),
(None, None, None, REMOTE_ACCESS, PossibleTestResults.SUCCESS),
(None, None, None, None, PossibleTestResults.SUCCESS),
# Unhappy Paths
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, None, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, None, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, None, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
(LAUNCH_TEMPLATE, INSTANCE_TYPES, DISK_SIZE, REMOTE_ACCESS, PossibleTestResults.FAILURE),
]
@pytest.mark.parametrize(
"launch_template, instance_types, disk_size, remote_access, expected_result",
test_cases,
)
def test_create_nodegroup_handles_launch_template_combinations(
self,
cluster_builder,
launch_template,
instance_types,
disk_size,
remote_access,
expected_result,
):
eks_hook, generated_test_data = cluster_builder()
nodegroup_name: str = NON_EXISTING_NODEGROUP_NAME
expected_exception: Type[AWSError] = InvalidParameterException
expected_message: str = ""
test_inputs = dict(
deepcopy(
# Required Constants
NodegroupInputs.REQUIRED
# Required Variables
+ [
(
ClusterAttributes.CLUSTER_NAME,
generated_test_data.existing_cluster_name,
),
(NodegroupAttributes.NODEGROUP_NAME, nodegroup_name),
]
# Test Case Values
+ [_ for _ in [launch_template, instance_types, disk_size, remote_access] if _]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: Dict = eks_hook.create_nodegroup(**test_inputs)[ResponseAttributes.NODEGROUP]
expected_output = deepcopy(test_inputs)
# The Create Nodegroup hook magically adds the required
# cluster/owned tag, so add that to the expected outputs.
expected_output['tags'] = {
f'kubernetes.io/cluster/{generated_test_data.existing_cluster_name}': 'owned'
}
for key, expected_value in expected_output.items():
assert result[key] == expected_value
else:
if launch_template and disk_size:
expected_message = LAUNCH_TEMPLATE_WITH_DISK_SIZE_MSG
elif launch_template and remote_access:
expected_message = LAUNCH_TEMPLATE_WITH_REMOTE_ACCESS_MSG
# Docs say this combination throws an exception but testing shows that
# instanceTypes overrides the launchTemplate instance values instead.
# Leaving here for easier correction if/when that gets fixed.
elif launch_template and instance_types:
pass
if expected_message:
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_nodegroup(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
def test_list_fargate_profiles_returns_empty_by_default(self, cluster_builder) -> None:
eks_hook, generated_test_data = cluster_builder()
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.existing_cluster_name)
assert isinstance(result, list)
assert len(result) == 0
def test_list_fargate_profiles_returns_sorted_profile_names(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.fargate_profile_names)
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result, initial_batch_size)
def test_list_fargate_profiles_returns_all_results(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.LARGE
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_result: List = sorted(generated_test_data.fargate_profile_names)
result: List = eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
assert_result_matches_expected_list(result, expected_result)
@mock_eks
def test_create_fargate_profile_throws_exception_when_cluster_not_found(self) -> None:
eks_hook: EksHook = EksHook(aws_conn_id=DEFAULT_CONN_ID, region_name=REGION)
non_existent_cluster_name: str = NON_EXISTING_CLUSTER_NAME
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(clusterName=non_existent_cluster_name)
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=non_existent_cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_create_fargate_profile_throws_exception_when_fargate_profile_already_exists(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceInUseException
expected_msg: str = FARGATE_PROFILE_EXISTS_MSG
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_throws_exception_when_cluster_not_active(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
non_existent_fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = InvalidRequestException
expected_msg: str = CLUSTER_NOT_READY_MSG.format(
clusterName=generated_test_data.cluster_name,
)
with mock.patch("moto.eks.models.Cluster.isActive", return_value=False):
with pytest.raises(ClientError) as raised_exception:
eks_hook.create_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=non_existent_fargate_profile_name,
**dict(FargateProfileInputs.REQUIRED), # type: ignore
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
def test_create_fargate_profile_generates_valid_profile_arn(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
expected_arn_values: List = [
PARTITION,
REGION,
ACCOUNT_ID,
generated_test_data.cluster_name,
generated_test_data.fargate_profile_names,
None,
]
assert_all_arn_values_are_valid(
expected_arn_values=expected_arn_values,
pattern=RegExTemplates.FARGATE_PROFILE_ARN,
arn_under_test=generated_test_data.fargate_describe_output[FargateProfileAttributes.ARN],
)
@freeze_time(FROZEN_TIME)
def test_create_fargate_profile_generates_valid_created_timestamp(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder()
result_time: datetime = generated_test_data.fargate_describe_output[
FargateProfileAttributes.CREATED_AT
]
assert iso_date(result_time) == FROZEN_TIME
def test_create_fargate_profile_saves_provided_parameters(self, fargate_profile_builder) -> None:
_, generated_test_data = fargate_profile_builder(minimal=False)
for key, expected_value in generated_test_data.attributes_to_test:
assert generated_test_data.fargate_describe_output[key] == expected_value
def test_describe_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.describe_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_describe_fargate_profile_throws_exception_when_profile_not_found(
self, fargate_profile_builder
) -> None:
client, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
client.describe_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_fargate_profile_removes_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(initial_batch_size)
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
result_fargate_profile_list: List = eks_hook.list_fargate_profiles(
clusterName=generated_test_data.cluster_name
)
assert len(result_fargate_profile_list) == (initial_batch_size - 1)
assert generated_test_data.existing_fargate_profile_name not in result_fargate_profile_list
def test_delete_fargate_profile_returns_deleted_fargate_profile(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size, minimal=False)
result: Dict = eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in generated_test_data.attributes_to_test:
assert result[key] == expected_value
def test_delete_fargate_profile_throws_exception_when_cluster_not_found(
self, fargate_profile_builder
) -> None:
eks_hook, generated_test_data = fargate_profile_builder()
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = CLUSTER_NOT_FOUND_MSG.format(
clusterName=generated_test_data.nonexistent_cluster_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.nonexistent_cluster_name,
fargateProfileName=generated_test_data.existing_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
def test_delete_fargate_profile_throws_exception_when_fargate_profile_not_found(
self, fargate_profile_builder, initial_batch_size: int = BatchCountSize.SMALL
) -> None:
eks_hook, generated_test_data = fargate_profile_builder(count=initial_batch_size)
expected_exception: Type[AWSError] = ResourceNotFoundException
expected_msg: str = FARGATE_PROFILE_NOT_FOUND_MSG.format(
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
with pytest.raises(ClientError) as raised_exception:
eks_hook.delete_fargate_profile(
clusterName=generated_test_data.cluster_name,
fargateProfileName=generated_test_data.nonexistent_fargate_profile_name,
)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_msg,
raised_exception=raised_exception,
)
# Verify no new Fargate profile was created.
fargate_profile_count_after_test: int = len(
eks_hook.list_fargate_profiles(clusterName=generated_test_data.cluster_name)
)
assert fargate_profile_count_after_test == initial_batch_size
# The following Selector test cases have all been verified against the AWS API using cURL.
selector_formatting_test_cases = [
# Format is ([Selector(s), expected_message, expected_result])
# Happy Paths
# Selector with a Namespace and no Labels
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and an empty collection of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 0),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and one valid Label
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", 1),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Selector with a Namespace and the maximum number of Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS),
}
],
None,
PossibleTestResults.SUCCESS,
),
# Two valid Selectors
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{FargateProfileAttributes.NAMESPACE: f'{DEFAULT_NAMESPACE}_2'},
],
None,
PossibleTestResults.SUCCESS,
),
# Unhappy Cases
# No Selectors provided
([], FARGATE_PROFILE_NEEDS_SELECTOR_MSG, PossibleTestResults.FAILURE),
# Empty Selector / Selector without a Namespace or Labels
([{}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Selector with labels but no Namespace
(
[{FargateProfileAttributes.LABELS: generate_dict("label", 1)}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Selector with Namespace but too many Labels
(
[
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
}
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
# Valid Selector followed by Empty Selector
(
[{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}, {}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Valid Selector
(
[{}, {FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE}],
FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE,
PossibleTestResults.FAILURE,
),
# Empty Selector followed by Empty Selector
([{}, {}], FARGATE_PROFILE_SELECTOR_NEEDS_NAMESPACE, PossibleTestResults.FAILURE),
# Valid Selector followed by Selector with Namespace but too many Labels
(
[
{FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE},
{
FargateProfileAttributes.NAMESPACE: DEFAULT_NAMESPACE,
FargateProfileAttributes.LABELS: generate_dict("label", MAX_FARGATE_LABELS + 1),
},
],
FARGATE_PROFILE_TOO_MANY_LABELS,
PossibleTestResults.FAILURE,
),
]
@pytest.mark.parametrize(
"selectors, expected_message, expected_result",
selector_formatting_test_cases,
)
@mock_eks
def test_create_fargate_selectors(self, cluster_builder, selectors, expected_message, expected_result):
client, generated_test_data = cluster_builder()
cluster_name: str = generated_test_data.existing_cluster_name
fargate_profile_name: str = NON_EXISTING_FARGATE_PROFILE_NAME
expected_exception: Type[AWSError] = InvalidParameterException
test_inputs = dict(
deepcopy(
# Required Constants
[POD_EXECUTION_ROLE_ARN]
# Required Variables
+ [
(ClusterAttributes.CLUSTER_NAME, cluster_name),
(FargateProfileAttributes.FARGATE_PROFILE_NAME, fargate_profile_name),
]
# Test Case Values
+ [(FargateProfileAttributes.SELECTORS, selectors)]
)
)
if expected_result == PossibleTestResults.SUCCESS:
result: List = client.create_fargate_profile(**test_inputs)[ResponseAttributes.FARGATE_PROFILE]
for key, expected_value in test_inputs.items():
assert result[key] == expected_value
else:
with pytest.raises(ClientError) as raised_exception:
client.create_fargate_profile(**test_inputs)
assert_client_error_exception_thrown(
expected_exception=expected_exception,
expected_msg=expected_message,
raised_exception=raised_exception,
)
class TestEksHook:
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn')
@pytest.mark.parametrize(
"aws_conn_id, region_name, expected_args",
[
[
'test-id',
'test-region',
[
'-m',
'airflow.providers.amazon.aws.utils.eks_get_token',
'--region-name',
'test-region',
'--aws-conn-id',
'test-id',
'--cluster-name',
'test-cluster',
],
],
[
None,
'test-region',
[
'-m',
'airflow.providers.amazon.aws.utils.eks_get_token',
'--region-name',
'test-region',
'--cluster-name',
'test-cluster',
],
],
[
None,
None,
['-m', 'airflow.providers.amazon.aws.utils.eks_get_token', '--cluster-name', 'test-cluster'],
],
],
)
def test_generate_config_file(self, mock_conn, aws_conn_id, region_name, expected_args):
mock_conn.describe_cluster.return_value = {
'cluster': {'certificateAuthority': {'data': 'test-cert'}, 'endpoint': 'test-endpoint'}
}
hook = EksHook(aws_conn_id=aws_conn_id, region_name=region_name)
with hook.generate_config_file(
eks_cluster_name='test-cluster', pod_namespace='k8s-namespace'
) as config_file:
config = yaml.safe_load(Path(config_file).read_text())
assert config == {
'apiVersion': 'v1',
'kind': 'Config',
'clusters': [
{
'cluster': {'server': 'test-endpoint', 'certificate-authority-data': 'test-cert'},
'name': 'test-cluster',
}
],
'contexts': [
{
'context': {'cluster': 'test-cluster', 'namespace': 'k8s-namespace', 'user': 'aws'},
'name': 'aws',
}
],
'current-context': 'aws',
'preferences': {},
'users': [
{
'name': 'aws',
'user': {
'exec': {
'apiVersion': 'client.authentication.k8s.io/v1alpha1',
'args': expected_args,
'command': sys.executable,
'env': [{'name': 'AIRFLOW__LOGGING__LOGGING_LEVEL', 'value': 'fatal'}],
'interactiveMode': 'Never',
}
},
}
],
}
@mock.patch('airflow.providers.amazon.aws.hooks.eks.RequestSigner')
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn')
@mock.patch('airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.get_session')
def test_fetch_access_token_for_cluster(self, mock_get_session, mock_conn, mock_signer):
mock_signer.return_value.generate_presigned_url.return_value = 'http://example.com'
mock_get_session.return_value.region_name = 'us-east-1'
hook = EksHook()
token = hook.fetch_access_token_for_cluster(eks_cluster_name='test-cluster')
mock_signer.assert_called_once_with(
service_id=mock_conn.meta.service_model.service_id,
region_name='us-east-1',
signing_name='sts',
signature_version='v4',
credentials=mock_get_session.return_value.get_credentials.return_value,
event_emitter=mock_get_session.return_value.events,
)
mock_signer.return_value.generate_presigned_url.assert_called_once_with(
request_dict={
'method': 'GET',
'url': 'https://sts.us-east-1.amazonaws.com/?Action=GetCallerIdentity&Version=2011-06-15',
'body': {},
'headers': {'x-k8s-aws-id': 'test-cluster'},
'context': {},
},
region_name='us-east-1',
expires_in=60,
operation_name='',
)
assert token == 'k8s-aws-v1.aHR0cDovL2V4YW1wbGUuY29t'
# Helper methods for repeated assert combinations.
def assert_all_arn_values_are_valid(expected_arn_values, pattern, arn_under_test) -> None:
"""
Applies regex `pattern` to `arn_under_test` and asserts
that each group matches the provided expected value.
A list entry of None in the 'expected_arn_values' will
assert that the value exists but not match a specific value.
"""
findall: List = pattern.findall(arn_under_test)[0]
# findall() returns a list of matches from right to left so it must be reversed
# in order to match the logical order of the 'expected_arn_values' list.
for value in reversed(findall):
expected_value = expected_arn_values.pop()
if expected_value:
assert value in expected_value
else:
assert value
assert region_matches_partition(findall[1], findall[0])
def assert_client_error_exception_thrown(
expected_exception: Type[AWSError], expected_msg: str, raised_exception: ExceptionInfo
) -> None:
"""
Asserts that the raised exception is of the expected type
and the resulting message matches the expected format.
"""
response = raised_exception.value.response[ErrorAttributes.ERROR]
assert response[ErrorAttributes.CODE] == expected_exception.TYPE
assert response[ErrorAttributes.MESSAGE] == expected_msg
def assert_result_matches_expected_list(
result: List, expected_result: List, expected_len: Optional[int] = None
) -> None:
assert result == expected_result
assert len(result) == expected_len or len(expected_result)
def assert_is_valid_uri(value: str) -> None:
result: ParseResult = urlparse(value)
assert all([result.scheme, result.netloc, result.path])
assert REGION in value
|
|
#!/usr/bin/env python
import datetime
import argparse
from egcg_core import util
from cached_property import cached_property
from EPPs.common import StepEPP, RestCommunicationEPP
reporting_app_date_format = '%d_%m_%Y_%H:%M:%S'
class StepPopulator(StepEPP, RestCommunicationEPP):
metrics_mapping = {}
endpoint = None
def output_artifacts_per_sample(self, sample_name):
return [
io[1]['uri']
for io in self.process.input_output_maps
if io[0]['uri'].samples[0].name == sample_name and io[1]['output-type'] == 'ResultFile'
]
def check_rest_data_and_artifacts(self, sample_name):
query_args = {'where': {'sample_id': sample_name}}
rest_entities = self.get_documents(self.endpoint, **query_args)
artifacts = self.output_artifacts_per_sample(sample_name=sample_name)
if len(rest_entities) != len(artifacts): # in sample review this will be 1, in run review this will be more
raise AssertionError(
'Data mismatch for sample %s: got %s Rest entities, %s output artifacts' % (
sample_name, len(rest_entities), len(artifacts)
)
)
return rest_entities, artifacts
def delivered(self, sample_name):
d = {'yes': True, 'no': False}
query_args = {'where': {'sample_id': sample_name}}
sample = self.get_documents('samples', **query_args)[0]
return d.get(sample.get('delivered'))
def processed(self, sample_name):
query_args = {'where': {'sample_id': sample_name}}
sample = self.get_documents('samples', **query_args)[0]
processing_status = util.query_dict(sample, 'aggregated.most_recent_proc.status')
return processing_status == 'finished'
def _run(self):
raise NotImplementedError
class PullInfo(StepPopulator):
def __init__(self, argv=None):
super().__init__(argv)
self.pull_data = not self.cmd_args.assess_only
@staticmethod
def add_args(argparser):
argparser.add_argument('--assess_only', action='store_true')
def _run(self):
artifacts_to_upload = set()
_ = self.output_artifacts # batch retrieve input and output artifacts along with samples
for sample in self.samples:
if self.pull_data:
self.debug('Adding artifact info for %s', sample.name)
artifacts_to_upload.update(self.add_artifact_info(sample))
self.debug('Assessing sample %s', sample.name)
artifacts_to_upload.update(self.assess_sample(sample))
self.lims.put_batch(artifacts_to_upload)
def add_artifact_info(self, sample):
rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name)
artifacts_to_upload = set()
for i in range(len(rest_entities)):
for art_field, api_field in self.metrics_mapping:
value = self.field_from_entity(rest_entities[i], api_field)
if value is not None:
if api_field.endswith('date'):
value = datetime.datetime.strptime(value, reporting_app_date_format).strftime('%Y-%m-%d')
artifacts[i].udf[art_field] = value
artifacts_to_upload.add(artifacts[i])
return artifacts_to_upload
def field_from_entity(self, entity, api_field):
return util.query_dict(entity, api_field)
def assess_sample(self, sample):
raise NotImplementedError
class PullRunElementInfo(PullInfo):
endpoint = 'run_elements'
metrics_mapping = [
('RE Id', 'run_element_id'),
('RE Nb Reads', 'passing_filter_reads'),
('RE Yield', 'aggregated.clean_yield_in_gb'),
('RE Yield Q30', 'aggregated.clean_yield_q30_in_gb'),
('RE %Q30', 'aggregated.clean_pc_q30'),
('RE Coverage', 'coverage.mean'),
('RE Estimated Duplicate Rate', 'lane_pc_optical_dups'),
('RE %Adapter', 'aggregated.pc_adaptor'),
('RE Review status', 'reviewed'),
('RE Review Comment', 'review_comments'),
('RE Review date', 'review_date'),
('RE previous Useable', 'useable'),
('RE previous Useable Comment', 'useable_comments'),
('RE previous Useable date', 'useable_date')
]
def assess_sample(self, sample):
artifacts_to_upload = set()
artifacts = self.output_artifacts_per_sample(sample_name=sample.name)
un_reviewed_artifacts = [a for a in artifacts if a.udf.get('RE Review status') not in ['pass', 'fail']]
if un_reviewed_artifacts:
# Skip samples that have un-reviewed run elements - could still be sequencing and change review outcome
return artifacts_to_upload
# Artifacts that pass the review
pass_artifacts = [a for a in artifacts if a.udf.get('RE Review status') == 'pass']
# Artifacts that fail the review
fail_artifacts = [a for a in artifacts if a.udf.get('RE Review status') == 'fail']
# Artifacts that are new
new_artifacts = [a for a in artifacts if a.udf.get('RE previous Useable') not in ['yes', 'no']]
# skip samples which have been delivered, mark any new REs as such, not changing older RE comments
if self.delivered(sample.name):
for a in new_artifacts:
a.udf['RE Useable Comment'] = 'AR: Delivered'
a.udf['RE Useable'] = 'no'
for a in pass_artifacts + fail_artifacts:
if a.udf.get('RE previous Useable Comment') and a.udf.get('RE previous Useable'):
a.udf['RE Useable Comment'] = a.udf.get('RE previous Useable Comment')
a.udf['RE Useable'] = a.udf.get('RE previous Useable')
artifacts_to_upload.update(artifacts)
return artifacts_to_upload
# skip samples which have been processed, mark any new REs as such, not changing older RE comments
if self.processed(sample.name):
for a in pass_artifacts + fail_artifacts:
if a.udf.get('RE previous Useable Comment') and a.udf.get('RE previous Useable'):
a.udf['RE Useable Comment'] = a.udf.get('RE previous Useable Comment')
a.udf['RE Useable'] = a.udf.get('RE previous Useable')
for a in new_artifacts:
a.udf['RE Useable Comment'] = 'AR: Sample already processed'
a.udf['RE Useable'] = 'no'
artifacts_to_upload.update(artifacts)
return artifacts_to_upload
target_yield = float(sample.udf.get('Required Yield (Gb)'))
good_re_yield = sum([float(a.udf.get('RE Yield')) for a in pass_artifacts])
# Increase target coverage by 5% to resolve borderline cases
target_coverage = 1.05 * sample.udf.get('Coverage (X)')
obtained_coverage = float(sum([a.udf.get('RE Coverage') for a in pass_artifacts]))
# Too much good yield limit to the best quality ones
if good_re_yield > target_yield * 2 and obtained_coverage > target_coverage:
# Too much yield: sort the good artifact by quality
pass_artifacts.sort(key=lambda x: x.udf.get('RE %Q30'), reverse=True)
current_yield = 0
for a in pass_artifacts:
current_yield += float(a.udf.get('RE Yield'))
if current_yield < target_yield * 2:
a.udf['RE Useable'] = 'yes'
a.udf['RE Useable Comment'] = 'AR: Good yield'
else:
a.udf['RE Useable'] = 'no'
a.udf['RE Useable Comment'] = 'AR: Too much good yield'
for a in fail_artifacts:
a.udf['RE Useable'] = 'no'
a.udf['RE Useable Comment'] = 'AR: Failed and not needed'
artifacts_to_upload.update(artifacts)
# Just the right amount of good yield: take it all
elif target_yield < good_re_yield < target_yield * 2 or obtained_coverage > target_coverage:
for a in pass_artifacts:
a.udf['RE Useable'] = 'yes'
a.udf['RE Useable Comment'] = 'AR: Good yield'
for a in fail_artifacts:
a.udf['RE Useable'] = 'no'
a.udf['RE Useable Comment'] = 'AR: Failed and not needed'
artifacts_to_upload.update(artifacts)
# Not enough good yield: manual decision
# Run element not passing review: manual decision
return artifacts_to_upload
class PullSampleInfo(PullInfo):
endpoint = 'samples'
metrics_mapping = [
('SR Yield (Gb)', 'aggregated.clean_yield_in_gb'),
('SR %Q30', 'aggregated.clean_pc_q30'),
('SR % Mapped', 'aggregated.pc_mapped_reads'),
('SR % Duplicates', 'aggregated.pc_duplicate_reads'),
('SR Mean Coverage', 'coverage.mean'),
('SR Species Found', 'aggregated.matching_species'),
('SR Sex Check Match', 'aggregated.sex_match'),
('SR Genotyping Match', 'aggregated.genotype_match'),
('SR Freemix', 'sample_contamination.freemix'),
('SR Review Status', 'reviewed'),
('SR Review Comments', 'review_comments'),
('SR Review Date', 'review_date'),
('SR previous Useable', 'useable'),
('SR previous Useable Comments', 'useable_comments'),
('SR previous Useable Date', 'useable_date')
]
def assess_sample(self, sample):
artifacts = self.output_artifacts_per_sample(sample_name=sample.name)
un_reviewed_artifacts = [a for a in artifacts if a.udf.get('SR Review Status') not in ['pass', 'fail']]
if un_reviewed_artifacts:
# Skip unreviewed samples
return artifacts
for a in artifacts:
if a.udf.get('SR Review Status') == 'pass':
a.udf['SR Useable'] = 'yes'
a.udf['SR Useable Comments'] = 'AR: Review passed'
elif a.udf.get('SR Review Status') == 'fail':
a.udf['SR Useable'] = 'no'
a.udf['SR Useable Comments'] = 'AR: Review failed'
return artifacts
def field_from_entity(self, entity, api_field):
field = super().field_from_entity(entity, api_field)
if api_field == 'aggregated.matching_species':
return ', '.join(field)
return field
class PushInfo(StepPopulator):
api_id_field = None
def review_entity_uid(self, artifact):
raise NotImplementedError
@cached_property
def current_time(self):
return datetime.datetime.now().strftime(reporting_app_date_format)
def _run(self):
# batch retrieve input and output artifacts along with samples
_ = self.output_artifacts
for sample in self.samples:
self.info('Pushing data for sample %s', sample.name)
rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name)
rest_api_data = {}
for e in rest_entities:
rest_api_data[e[self.api_id_field]] = e
for artifact in artifacts:
rest_entities = rest_api_data.get(self.review_entity_uid(artifact))
payload = {}
for art_field, api_field in self.metrics_mapping:
value = artifact.udf.get(art_field)
if value is not None and value != rest_entities.get(api_field):
payload[api_field] = value
if payload:
# The date is set to now.
payload['useable_date'] = self.current_time
self.patch_entry(self.endpoint, payload, self.api_id_field, self.review_entity_uid(artifact))
# finish the action on the rest api
self.patch_entry(
'actions',
{'date_finished': self.current_time},
'action_id',
'lims_' + self.process.id
)
class PushRunElementInfo(PushInfo):
endpoint = 'run_elements'
api_id_field = 'run_element_id'
metrics_mapping = [
('RE Useable', 'useable'),
('RE Useable Comment', 'useable_comments'),
]
def review_entity_uid(self, artifact):
return artifact.udf.get('RE Id')
class PushSampleInfo(PushInfo):
endpoint = 'samples'
api_id_field = 'sample_id'
metrics_mapping = [
('SR Useable', 'useable'),
('SR Useable Comments', 'useable_comments'),
]
def review_entity_uid(self, artifact):
return artifact.samples[0].name
def main():
p = argparse.ArgumentParser()
p.add_argument('--review_type', required=True, choices=('run', 'sample'))
p.add_argument('--action_type', required=True, choices=('pull', 'push'))
args, cls_args = p.parse_known_args()
reviewer_map = {
'run': {'pull': PullRunElementInfo, 'push': PushRunElementInfo},
'sample': {'pull': PullSampleInfo, 'push': PushSampleInfo}
}
action = reviewer_map[args.review_type][args.action_type](cls_args)
action.run()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#need easy_install ntplib
#sudo apt-get install python-crypto
import datetime,time,sys,os
import pprint, pickle
import ntplib
import thread
import threading
useNTPserversync=True
update_time_NTPserversync_delay=400 #second between ntp time correction algo [fix ping latency and optimze the efficiency of internation sync
deported_time=False
deport_time_day=0 #0-x
deport_time_hour=9 #0-24
deport_time_minute=30 #0-59
deport_time_second=0 #0-59
deport_time_microsecond=0 #0-999999
next_key_approching_dt=3 # 5 for #mean number of key before
default_apprach_dt=0.001 #between 0.2 to 0.001> 0.1 is ok
default_dt=0.5 #0.5 for seconds changing / #
loop_dt=''
key_dir_path=''
exiting_there=False
class FuncThread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
self._target(*self._args)
#get real time from ntp server on line
def get_real_timedate():
table_ntp_server=['fr.pool.ntp.org']
x = ntplib.NTPClient()
try:
answer=datetime.datetime.utcfromtimestamp(x.request(str(table_ntp_server[0])).tx_time)
except:
print "ERROR:> no connectivity to "+str(table_ntp_server)
sys.exit()
answer=""
return answer
#set real timedate with timedelta(from get real timedate) apply
def set_real_timedate():
global deport_time_day,deport_time_hour,deport_time_minute,deport_time_second,deport_time_microsecond,deported_time
rt=get_real_timedate()
ost=datetime.datetime.now()
real_delay=ost-rt
#print "dt between legal time " + str(real_delay.total_seconds())
deport_time_day=0 #0-x
deport_time_hour=0 #0-24
deport_time_minute=0 #0-59
deport_time_second=-real_delay.total_seconds() #0-59
deport_time_microsecond=real_delay.microseconds #0-999999
deported_time=True
def update_time_delay(arg1='',arg2=''):
global update_time_NTPserversync_delay
global exiting_there
print "************************* SET TIME DELAY according to real time getin wiht ntp \n"
set_real_timedate()
#print "*************************updating via ntp is ok<"
time.sleep(update_time_NTPserversync_delay)
if exiting_there==False:
update_time_delay()
def millis():
return int(str(int(round(time.time() * 1000)))[-3:])
def convert_datetime_2_timestamp(date):
part_a=str(date).split(' ')[0].replace('-','')
part_b=date.hour*3600+date.minute *60+date.second
return part_a,part_b
def getdatetime():
global deported_time,deport_time_day,deport_time_hour,deport_time_minute,deport_time_second,deport_time_microsecond
if deported_time==True:
rt=datetime.datetime.now()
td=datetime.timedelta(days=deport_time_day, seconds=deport_time_second, microseconds=deport_time_microsecond, milliseconds=0, minutes=deport_time_minute, hours=deport_time_hour, weeks=0)
d=rt+td
else:
d=datetime.datetime.now()
return d
def loadtable(file_table):
pkl_file = open(file_table, 'rb')
table = pickle.load(pkl_file)
return table
def loadcurrentday_table(delta_day=0):
global local,key_dir_path
d=getdatetime()+datetime.timedelta(days=delta_day)
obj_t= convert_datetime_2_timestamp(d)
pkl_filename=obj_t[0]
if len(pkl_filename)==8:
pkl_filename=pkl_filename[2:]
if local==True:
table=loadtable('key/'+pkl_filename+'.pkl') #local
else:
if key_dir_path!='':
table=loadtable(key_dir_path+os.sep+pkl_filename+'.pkl')
else:
table=loadtable('crypto_client/key/'+pkl_filename+'.pkl') #from ../
return table
def write_current_key(key_obj):
output = open('current_key.pkl', 'wb')
pickle.dump(key_obj, output, -1)
output.close()
def do_key_regeneration_job(key_dir_path_arg=""):
global key_dir_path
key_dir_path=key_dir_path_arg
global exiting_there
global useNTPserversync
global update_time_NTPserversync_delay
global deported_time
global deport_time_day
global deport_time_hour
global deport_time_minute
global deport_time_second
global deport_time_microsecond
global next_key_approching_dt
global default_apprach_dt
global default_dt
global loop_dt
current_key_value=''
next_key_is_focused=False
next_key_approching=False
next_key_is_reach=False
next_timestamp=0
next_i=0
next_day_approching=False
pre_next_day_is_reach=False
next_day_is_reach=False
next_day_now=False
table_yesterday=[]
table_today=[]
table=loadcurrentday_table()
table_today=table
next_day_cp=0
correction=0
draw_l=""
if useNTPserversync==True:
print "starting refresh with ntp thread"
thread_refresh_with_ntp=FuncThread(update_time_delay)
thread_refresh_with_ntp.start()
set_real_timedate()
if deported_time==True:
print "deported time"
print "Starting"
exiting=False
firstrun=True
while exiting==False:
#loop_start_dt=datetime.datetime.now()
if next_day_approching==False:
table=table_today
pass
else:
table=table_yesterday
t_info= convert_datetime_2_timestamp(getdatetime())
cu_dt=default_dt
if next_key_is_focused==False or next_key_is_reach==True or next_day_is_reach==True:
if next_day_approching==True:
if pre_next_day_is_reach==True:
next_day_cp=next_day_cp+1
if next_day_cp==2:
#print "focused new key with next_day_approching=True"
next_day_now=True
next_day_approching=False
next_day_is_reach=False
pre_next_day_is_reach=False
table=table_today
cu_dt=default_apprach_dt
next_i=0
dt_changing_key=0
for i in range(next_i,len(table)):
if int(table[i][1])>int(t_info[1]):
next_timestamp=int(table[i][1])
next_i=i
dt_changing_key=int(table[i][1])-int(t_info[1])
current_key=table[i-1]
current_key_value = current_key[2]
#print "key found" #+ str(table[i-1])
#print "next key is "+str(table[i])
#print "change key in " + str(dt_changing_key) +"s"
next_key_is_focused=True
next_key_approching=False
next_key_is_reach=False
#print "current key ID:" + str(current_key[1]) +" VALUE:'"+ current_key_value +"'"
write_current_key(current_key)
d=str(datetime.datetime.now())
print "> encrypted key changed at \n [ostime:"+d+"\n [softwaretime:"+str(getdatetime())+"]"
break
else:
dt_changing_key=next_timestamp-int(t_info[1])
#dt_changing_key=[dt_changing_key_fullobj[0],dt_changing_key_fullobj[1]]
if dt_changing_key<=next_key_approching_dt:
#print "approching changing"
#print "change key to "+ str(table[i][1]) +" in " + str(dt_changing_key) +"s"
#if dt_changing_key!=0:
#draw="change key to "+ str(table[i][1]) +" in " + str(dt_changing_key) +"s"
# if draw!=draw_l:
# draw_l=draw
# print draw
#print dt_changing_key_fullobj[3]
next_key_approching=True
cu_dt=default_apprach_dt
#modify time_sleep
if dt_changing_key<=1:
#print milli
#print "high precision runnuer \n [ostime:"+str(datetime.datetime.now())+"\n [softwaretime:"+str(getdatetime())+"]"
cu_dt=float(default_apprach_dt)/10.0
#print cu_dt
if dt_changing_key<=0:
#print "changing key now! [real:"+str(getdatetime())+" / local:"+str(datetime.datetime.now())+"]"
#print "change key to "+ str(table[i][1])
next_key_is_reach=True
cu_dt=0
pass
if cu_dt==default_dt:
try:
#draw="change key to "+ str(table[i][1]) +" in " + str(dt_changing_key) +"s"
if draw!=draw_l:
draw_l=draw
print draw
pass
except:
pass
else:
pass
if next_i>len(table)-3:
print "> changing day db prepare"
if next_day_approching==False:
table_yesterday=table
try:
print "loading next day"
table_today=loadcurrentday_table(1)
except:
pass
next_day_approching=True
if next_i==len(table)-1:
print " > critical before changing day"
pre_next_day_is_reach=True
#let possibility to be exiting by outside
#try:
# with open(os.getcwd()+os.sep+'crypto_client'+os.sep+'exit_run','r') as f:
# f.read()
# break
# exiting=True
# exiting_there=True
# sys.exit()
#except:
# pass
#print cu_dt
#print loop_dt
#loop_end_dt=datetime.datetime.now()
#loop_dt= loop_end_dt- loop_start_dt
#print loop_dt.microseconds/1000
time.sleep(cu_dt)
if useNTPserversync==True:
print "exiting refresh with ntp thread"
thread_refresh_with_ntp.exit()
local=False
if __name__ == "__main__":
global local
print "need be run "
local=True
do_key_regeneration_job()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
class ProjectsTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
def _delete_project(self, project_id):
resp, _ = self.v3_client.delete_project(project_id)
self.assertEqual(resp['status'], '204')
self.assertRaises(
exceptions.NotFound, self.v3_client.get_project, project_id)
@attr(type='gate')
def test_project_list_delete(self):
# Create several projects and delete them
for _ in xrange(3):
resp, project = self.v3_client.create_project(
rand_name('project-new'))
self.addCleanup(self._delete_project, project['id'])
resp, list_projects = self.v3_client.list_projects()
self.assertEqual(resp['status'], '200')
resp, get_project = self.v3_client.get_project(project['id'])
self.assertIn(get_project, list_projects)
@attr(type='gate')
def test_project_create_with_description(self):
# Create project with a description
project_name = rand_name('project-')
project_desc = rand_name('desc-')
resp, project = self.v3_client.create_project(
project_name, description=project_desc)
self.v3data.projects.append(project)
st1 = resp['status']
project_id = project['id']
desc1 = project['description']
self.assertEqual(st1, '201')
self.assertEqual(desc1, project_desc, 'Description should have '
'been sent in response for create')
resp, body = self.v3_client.get_project(project_id)
desc2 = body['description']
self.assertEqual(desc2, project_desc, 'Description does not appear'
'to be set')
@attr(type='gate')
def test_project_create_enabled(self):
# Create a project that is enabled
project_name = rand_name('project-')
resp, project = self.v3_client.create_project(
project_name, enabled=True)
self.v3data.projects.append(project)
project_id = project['id']
st1 = resp['status']
en1 = project['enabled']
self.assertEqual(st1, '201')
self.assertTrue(en1, 'Enable should be True in response')
resp, body = self.v3_client.get_project(project_id)
en2 = body['enabled']
self.assertTrue(en2, 'Enable should be True in lookup')
@attr(type='gate')
def test_project_create_not_enabled(self):
# Create a project that is not enabled
project_name = rand_name('project-')
resp, project = self.v3_client.create_project(
project_name, enabled=False)
self.v3data.projects.append(project)
st1 = resp['status']
en1 = project['enabled']
self.assertEqual(st1, '201')
self.assertEqual('false', str(en1).lower(),
'Enable should be False in response')
resp, body = self.v3_client.get_project(project['id'])
en2 = body['enabled']
self.assertEqual('false', str(en2).lower(),
'Enable should be False in lookup')
@attr(type='gate')
def test_project_update_name(self):
# Update name attribute of a project
p_name1 = rand_name('project-')
resp, project = self.v3_client.create_project(p_name1)
self.v3data.projects.append(project)
resp1_name = project['name']
p_name2 = rand_name('project2-')
resp, body = self.v3_client.update_project(project['id'], name=p_name2)
st2 = resp['status']
resp2_name = body['name']
self.assertEqual(st2, '200')
self.assertNotEqual(resp1_name, resp2_name)
resp, body = self.v3_client.get_project(project['id'])
resp3_name = body['name']
self.assertNotEqual(resp1_name, resp3_name)
self.assertEqual(p_name1, resp1_name)
self.assertEqual(resp2_name, resp3_name)
@attr(type='gate')
def test_project_update_desc(self):
# Update description attribute of a project
p_name = rand_name('project-')
p_desc = rand_name('desc-')
resp, project = self.v3_client.create_project(
p_name, description=p_desc)
self.v3data.projects.append(project)
resp1_desc = project['description']
p_desc2 = rand_name('desc2-')
resp, body = self.v3_client.update_project(
project['id'], description=p_desc2)
st2 = resp['status']
resp2_desc = body['description']
self.assertEqual(st2, '200')
self.assertNotEqual(resp1_desc, resp2_desc)
resp, body = self.v3_client.get_project(project['id'])
resp3_desc = body['description']
self.assertNotEqual(resp1_desc, resp3_desc)
self.assertEqual(p_desc, resp1_desc)
self.assertEqual(resp2_desc, resp3_desc)
@attr(type='gate')
def test_project_update_enable(self):
# Update the enabled attribute of a project
p_name = rand_name('project-')
p_en = False
resp, project = self.v3_client.create_project(p_name, enabled=p_en)
self.v3data.projects.append(project)
resp1_en = project['enabled']
p_en2 = True
resp, body = self.v3_client.update_project(
project['id'], enabled=p_en2)
st2 = resp['status']
resp2_en = body['enabled']
self.assertEqual(st2, '200')
self.assertNotEqual(resp1_en, resp2_en)
resp, body = self.v3_client.get_project(project['id'])
resp3_en = body['enabled']
self.assertNotEqual(resp1_en, resp3_en)
self.assertEqual('false', str(resp1_en).lower())
self.assertEqual(resp2_en, resp3_en)
@attr(type='gate')
def test_associate_user_to_project(self):
#Associate a user to a project
#Create a Project
p_name = rand_name('project-')
resp, project = self.v3_client.create_project(p_name)
self.v3data.projects.append(project)
#Create a User
u_name = rand_name('user-')
u_desc = u_name + 'description'
u_email = u_name + '@testmail.tm'
u_password = rand_name('pass-')
resp, user = self.v3_client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email, project_id=project['id'])
self.assertEqual(resp['status'], '201')
# Delete the User at the end of this method
self.addCleanup(self.v3_client.delete_user, user['id'])
# Get User To validate the user details
resp, new_user_get = self.v3_client.get_user(user['id'])
#Assert response body of GET
self.assertEqual(u_name, new_user_get['name'])
self.assertEqual(u_desc, new_user_get['description'])
self.assertEqual(project['id'],
new_user_get['project_id'])
self.assertEqual(u_email, new_user_get['email'])
@attr(type=['negative', 'gate'])
def test_list_projects_by_unauthorized_user(self):
# Non-admin user should not be able to list projects
self.assertRaises(exceptions.Unauthorized,
self.v3_non_admin_client.list_projects)
@attr(type=['negative', 'gate'])
def test_project_create_duplicate(self):
# Project names should be unique
project_name = rand_name('project-dup-')
resp, project = self.v3_client.create_project(project_name)
self.v3data.projects.append(project)
self.assertRaises(
exceptions.Duplicate, self.v3_client.create_project, project_name)
@attr(type=['negative', 'gate'])
def test_create_project_by_unauthorized_user(self):
# Non-admin user should not be authorized to create a project
project_name = rand_name('project-')
self.assertRaises(
exceptions.Unauthorized, self.v3_non_admin_client.create_project,
project_name)
@attr(type=['negative', 'gate'])
def test_create_project_with_empty_name(self):
# Project name should not be empty
self.assertRaises(exceptions.BadRequest, self.v3_client.create_project,
name='')
@attr(type=['negative', 'gate'])
def test_create_projects_name_length_over_64(self):
# Project name length should not be greater than 64 characters
project_name = 'a' * 65
self.assertRaises(exceptions.BadRequest, self.v3_client.create_project,
project_name)
@attr(type=['negative', 'gate'])
def test_project_delete_by_unauthorized_user(self):
# Non-admin user should not be able to delete a project
project_name = rand_name('project-')
resp, project = self.v3_client.create_project(project_name)
self.v3data.projects.append(project)
self.assertRaises(
exceptions.Unauthorized, self.v3_non_admin_client.delete_project,
project['id'])
@attr(type=['negative', 'gate'])
def test_delete_non_existent_project(self):
# Attempt to delete a non existent project should fail
self.assertRaises(exceptions.NotFound, self.v3_client.delete_project,
'junk_Project_123456abc')
class ProjectsTestXML(ProjectsTestJSON):
_interface = 'xml'
|
|
import os, subprocess, time, signal, sys
import random
from textgame import HomeWorld
from gym import spaces, error, utils
#import spacy
import re
# ------------ ----------
# |living | |garden |
#Limbo-+ 00 +----------+ 01 |
# | | | |
# -----+------ ----+-----
# | |
# -----+------ ----+-----
# |bedroom | |kitchen |
# | 03 +----------+ 02 |
# | | | |
# ------------ ----------
class HomeWorld1(HomeWorld):
def __init__(self):
#
# environment definition
#
self.descriptions = {
"living" : ["This room has a couch, chairs and TV.",
"You have entered the living room. You can watch TV here.",
"This room has two sofas, chairs and a chandelier."],
"garden" : ["This space has a swing, flowers and trees.",
"You have arrived at the garden. You can exercise here.",
"This area has plants, grass and rabbits."],
"kitchen" : ["This room has a fridge, oven, and a sink.",
"You have arrived in the kitchen. You can find food and drinks here.",
"This living area has pizza, coke, and icecream."],
"bedroom" : ["This area has a bed, desk and a dresser.",
"You have arrived in the bedroom. You can rest here.",
"You see a wooden cot and a mattress on top of it."],
}
self.rooms = self.descriptions.keys()
self.env_objects = {
"tv" : "A huge television that is great for watching games.",
"bike" : "A nice shiny bike that is fun to ride.",
"apple" : "A red juicy fruit.",
"bed" : "A nice, comfortable bed with pillows and sheets."
}
self.definitions = {
("eat apple") : [{
"conds" :{"room":"kitchen", "quest":"hungry"},
"effs" :{"quest":""}
},{
"conds" :{"room":"kitchen",},
"effs" :{}
}],
("exercise bike") : [{
"conds" :{"room":"garden", "quest":"fat"},
"effs" :{"quest":""}
},{
"conds" :{"room":"garden",},
"effs" :{}
}],
("sleep bed") : [{
"conds" :{"room":"bedroom", "quest":"sleepy"},
"effs" :{"quest":""}
},{
"conds" :{"room":"bedroom",},
"effs" :{}
}],
("watch tv") : [{
"conds" : {"room":"living", "quest":"bored"},
"effs" : {"quest":""}
},{
"conds" : {"room":"living"},
"effs" : {}
}],
#
# Move in direction
#
("go north") : [
{"conds":{"room":"bedroom"}, "effs":{"room":"living"}},
{"conds":{"room":"kitchen"}, "effs":{"room":"garden"}},
],
("go south") : [
{"conds":{"room":"living"}, "effs":{"room":"bedroom"}},
{"conds":{"room":"garden"}, "effs":{"room":"kitchen"}},
],
("go east") : [
{"conds":{"room":"living"}, "effs":{"room":"garden"}},
{"conds":{"room":"bedroom"}, "effs":{"room":"kitchen"}},
],
("go west") : [
{"conds":{"room":"garden"}, "effs":{"room":"living"}},
{"conds":{"room":"kitchen"}, "effs":{"room":"bedroom"}},
],
}
self.text = {
"quest" : {
"hungry" : "You are hungry",
"sleepy" : "You are sleepy",
"bored" : "You are bored",
"fat" : "You are getting fat",
},
"mislead" : {
"hungry" : "You are not hungry",
"sleepy" : "You are not sleepy",
"bored" : "You are not bored",
"fat" : "You are not getting fat",
},
}
HomeWorld.__init__(self)
self.actions = list({a.split(" ")[0] for a in self.definitions})
self.objects = list({a.split(" ")[1] for a in self.definitions})
self.num_actions = len(self.actions)
self.num_objects = len(self.objects)
self.quests = self.text["quest"].keys()
self.extra_vocab = ['nothing', 'happend', 'not', 'but', 'now']
self.state = {
"room" : "",
"description" : "",
"quest" : "",
"mislead" : "",
}
self.init_vocab()
# set the observation space to the vocab size and some kind of sequencial
# data
self.vocab_space = self.get_vocab_size()
self.action_space = spaces.Tuple((spaces.Discrete(self.num_actions), spaces.Discrete(self.num_objects)))
self.observation_space = None
self.seq_length = 30
def init_vocab(self):
words = u" ".join( [d for ds in self.descriptions.values() for d in ds] +
self.env_objects.values() +
[t for k,v in self.text.iteritems() for t in v.values()] +
self.extra_vocab
)
words = re.sub(r'[^\w\s]','',words)
self.vocab = set(re.split(r'\s*', words))
def get_quest(self):
if not self.state["quest"]:
return "There is nothing to do."
if self.state["mislead"]:
return "{} now but {} now.".format(
self.text["mislead"][self.state["mislead"]],
self.text["quest"][self.state["quest"]])
return "{} now.".format(self.text["quest"][self.state["quest"]])
def get_room_desc(self):
return self.state["description"]
def get_output(self):
# get room description
room = self.get_room_desc()
# get quest description
quest = self.get_quest()
output = [room, quest]
# shuffle the output for increasing states!
#self.rng.shuffle(output)
return " ".join(output)
def get_location(self):
return self.rooms[self.state[0]]
def get_action(self,action):
return self.actions[action[0]] + " " + self.objects[action[1]]
def is_executable(self, conditions):
return all(self.state[f] == v for f,v in conditions.iteritems())
def is_movement(self,action):
a,o = action.split(" ")
return o not in self.env_objects
def is_terminal(self):
return (not self.state["quest"])
def is_successful(self):
return self.is_terminal()
def do(self, a):
"""
Action execution function: return next state and reward for executing action
in current state
"""
# check whether action does change the state - executability
if a in self.definitions:
for action in self.definitions[a]:
if not self.is_executable(action["conds"]): continue
for f,v in action["effs"].iteritems():
self.state[f] = v
if self.is_movement(a):
self.state["description"] = self.rng.choice(self.descriptions[self.state["room"]])
return self.get_output(), -0.01
else:
obj = a.split(" ")[1]
out = self.env_objects[obj]
if self.is_terminal():
if self.is_successful():
r = 1
else:
r = -1
else:
r = -0.01
#r = 1 if self.is_terminal() else -0.01
return out, r
# if not, return "Nothing happend." and same state description
return "Nothing happend. " + self.get_output(), -0.1
def reset(self):
location = self.rng.choice(self.rooms)
self.state["room"] = location
self.state["description"] = self.rng.choice(self.descriptions[location])
quests = self.permutation(self.quests)
self.state["quest"] = quests[0]
self.state["mislead"] = quests[1]
return self.get_output()
def main():
import gym, gym_textgame
env = gym.make("HomeWorld-v0")
done = False
states = []
print env.action_space
s = env.reset()
i = 0
print "({})".format(i), s
while not done:
states.append(s)
i += 1
a = env.action_space.sample()
s, r, done, info = env.step(a)
print "({}) {} {}".format(i, env.env.get_action(a), s)
print "done!", r
print env.env.state
def test():
env = HomeWorld1()
done = False
print env.reset()
while not done:
action = raw_input(">> ")
if action == "help":
print env.definitions.keys()
continue
elif action == "help state":
print env.state
continue
else:
state, reward = env.do(action)
done = env.is_terminal()
print state
print "you are done!"
if env.is_successful():
print "congrats!"
else:
print "sorry dude, you are dead..."
if __name__ == "__main__":
test()
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Application Load Balancers
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import itertools
import logging
from collections import defaultdict
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import (
Filter, FilterRegistry, FilterValidationError, DefaultVpcBase,
MetricsFilter, ValueFilter)
import c7n.filters.vpc as net_filters
from c7n import tags
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource, ConfigSource
from c7n.utils import (
local_session, chunks, type_schema, get_retry, set_annotation)
from c7n.resources.shield import IsShieldProtected, SetShieldProtection
log = logging.getLogger('custodian.app-elb')
filters = FilterRegistry('app-elb.filters')
actions = ActionRegistry('app-elb.actions')
filters.register('tag-count', tags.TagCountFilter)
filters.register('marked-for-op', tags.TagActionFilter)
@resources.register('app-elb')
class AppELB(QueryResourceManager):
"""Resource manager for v2 ELBs (AKA ALBs).
"""
class resource_type(object):
service = 'elbv2'
type = 'app-elb'
enum_spec = ('describe_load_balancers', 'LoadBalancers', None)
name = 'LoadBalancerName'
id = 'LoadBalancerArn'
filter_name = None
filter_type = None
dimension = None
date = 'CreatedTime'
config_type = 'AWS::ElasticLoadBalancingV2::LoadBalancer'
filter_registry = filters
action_registry = actions
retry = staticmethod(get_retry(('Throttling',)))
@classmethod
def get_permissions(cls):
# override as the service is not the iam prefix
return ("elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeTags")
def get_arn(self, r):
return r[self.resource_type.id]
def get_source(self, source_type):
if source_type == 'describe':
return DescribeAppElb(self)
elif source_type == 'config':
return ConfigAppElb(self)
raise ValueError("Unsupported source: %s for %s" % (
source_type, self.resource_type.config_type))
class DescribeAppElb(DescribeSource):
def augment(self, albs):
_describe_appelb_tags(
albs,
self.manager.session_factory,
self.manager.executor_factory,
self.manager.retry)
return albs
class ConfigAppElb(ConfigSource):
def load_resource(self, item):
resource = super(ConfigAppElb, self).load_resource(item)
resource['Tags'] = [{u'Key': t['key'], u'Value': t['value']}
for t in json.loads(item['supplementaryConfiguration']['Tags'])]
return resource
def _describe_appelb_tags(albs, session_factory, executor_factory, retry):
def _process_tags(alb_set):
client = local_session(session_factory).client('elbv2')
alb_map = {alb['LoadBalancerArn']: alb for alb in alb_set}
results = retry(client.describe_tags, ResourceArns=list(alb_map.keys()))
for tag_desc in results['TagDescriptions']:
if ('ResourceArn' in tag_desc and
tag_desc['ResourceArn'] in alb_map):
alb_map[tag_desc['ResourceArn']]['Tags'] = tag_desc['Tags']
with executor_factory(max_workers=2) as w:
list(w.map(_process_tags, chunks(albs, 20)))
def _add_appelb_tags(albs, session_factory, ts):
client = local_session(session_factory).client('elbv2')
client.add_tags(
ResourceArns=[alb['LoadBalancerArn'] for alb in albs],
Tags=ts)
def _remove_appelb_tags(albs, session_factory, tag_keys):
client = local_session(session_factory).client('elbv2')
client.remove_tags(
ResourceArns=[alb['LoadBalancerArn'] for alb in albs],
TagKeys=tag_keys)
filters.register('shield-enabled', IsShieldProtected)
actions.register('set-shield', SetShieldProtection)
@filters.register('metrics')
class AppElbMetrics(MetricsFilter):
"""Filter app load balancer by metric values.
See available metrics here: https://goo.gl/TLQ9Fr
Custodian defaults to specifying dimensions for the app elb only.
Target Group dimension not supported atm.
"""
def get_dimensions(self, resource):
return [{
'Name': self.model.dimension,
'Value': 'app/%s/%s' % (
resource[self.model.name],
resource[self.model.id].rsplit('/')[-1])}]
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[]"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "AvailabilityZones[].SubnetId"
filters.register('network-location', net_filters.NetworkLocation)
@AppELB.filter_registry.register('waf-enabled')
class WafEnabled(Filter):
schema = type_schema(
'waf-enabled', **{
'web-acl': {'type': 'string'},
'state': {'type': 'boolean'}})
permissions = ('waf-regional:ListResourcesForWebACL', 'waf-regional:ListWebACLs')
# TODO verify name uniqueness within region/account
# TODO consider associated resource fetch in augment
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
'waf-regional')
target_acl = self.data.get('web-acl')
state = self.data.get('state', False)
name_id_map = {}
resource_map = {}
wafs = self.manager.get_resource_manager('waf-regional').resources()
for w in wafs:
if 'c7n:AssociatedResources' not in w:
arns = client.list_resources_for_web_acl(
WebACLId=w['WebACLId']).get('ResourceArns', [])
w['c7n:AssociatedResources'] = arns
name_id_map[w['Name']] = w['WebACLId']
for r in w['c7n:AssociatedResources']:
resource_map[r] = w['WebACLId']
target_acl_id = name_id_map.get(target_acl, target_acl)
# generally frown on runtime validation errors, but also frown on
# api calls during validation.
if target_acl_id not in name_id_map.values():
raise ValueError("Invalid target acl:%s, acl not found" % target_acl)
arn_key = self.manager.resource_type.id
state_map = {}
for r in resources:
arn = r[arn_key]
if arn in resource_map:
r['c7n_webacl'] = resource_map[arn]
if not target_acl:
state_map[arn] = True
continue
r_acl = resource_map[arn]
if r_acl == target_acl_id:
state_map[arn] = True
continue
state_map[arn] = False
else:
state_map[arn] = False
return [r for r in resources if state_map[r[arn_key]] == state]
@AppELB.action_registry.register('set-waf')
class SetWaf(BaseAction):
"""Enable/Disable waf protection on applicable resource.
"""
permissions = ('waf-regional:AssociateWebACL', 'waf-regional:ListWebACLs')
schema = type_schema(
'set-waf', required=['web-acl'], **{
'web-acl': {'type': 'string'},
# 'force': {'type': 'boolean'},
'state': {'type': 'boolean'}})
def validate(self):
found = False
for f in self.manager.filters:
if isinstance(f, WafEnabled):
found = True
break
if not found:
# try to ensure idempotent usage
raise FilterValidationError(
"set-waf should be used in conjunction with waf-enabled filter")
return self
def process(self, resources):
wafs = self.manager.get_resource_manager('waf-regional').resources()
name_id_map = {w['Name']: w['WebACLId'] for w in wafs}
target_acl = self.data.get('web-acl')
target_acl_id = name_id_map.get(target_acl, target_acl)
state = self.data.get('state', True)
if state and target_acl_id not in name_id_map.values():
raise ValueError("invalid web acl: %s" % (target_acl_id))
client = local_session(
self.manager.session_factory).client('waf-regional')
arn_key = self.manager.resource_type.id
# TODO implement force to reassociate.
# TODO investigate limits on waf association.
for r in resources:
if state:
client.associate_web_acl(
WebACLId=target_acl_id, ResourceArn=r[arn_key])
else:
client.disassociate_web_acl(
WebACLId=target_acl_id, ResourceArn=r[arn_key])
@actions.register('set-s3-logging')
class SetS3Logging(BaseAction):
"""Action to enable/disable S3 logging for an application loadbalancer.
:example:
.. code-block: yaml
policies:
- name: elbv2-test
resource: app-elb
filters:
- type: value
key: Attributes."access_logs.s3.enabled"
value: False
actions:
- type: enable-s3-logging
bucket: elbv2logtest
prefix: dahlogs
"""
schema = type_schema(
'set-s3-logging',
state={'enum': ['enabled', 'disabled']},
bucket={'type': 'string'},
prefix={'type': 'string'},
required=('state',))
permissions = ("elasticloadbalancing:ModifyLoadBalancerAttributes",)
def validate(self):
if self.data.get('state') == 'enabled':
if 'bucket' not in self.data or 'prefix' not in self.data:
raise FilterValidationError((
"alb logging enablement requires `bucket` "
"and `prefix` specification"))
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('elbv2')
for elb in resources:
elb_arn = elb['LoadBalancerArn']
attributes = [{
'Key': 'access_logs.s3.enabled',
'Value': (
self.data.get('state') == 'enabled' and 'true' or 'value')}]
if self.data.get('state') == 'enabled':
attributes.append({
'Key': 'access_logs.s3.bucket',
'Value': self.data['bucket']})
prefix_template = self.data['prefix']
info = {t['Key']: t['Value'] for t in elb.get('Tags', ())}
info['DNSName'] = elb.get('DNSName', '')
info['AccountId'] = elb['LoadBalancerArn'].split(':')[4]
info['LoadBalancerName'] = elb['LoadBalancerName']
attributes.append({
'Key': 'access_logs.s3.prefix',
'Value': prefix_template.format(**info)})
self.manager.retry(
client.modify_load_balancer_attributes,
LoadBalancerArn=elb_arn, Attributes=attributes)
@actions.register('mark-for-op')
class AppELBMarkForOpAction(tags.TagDelayedAction):
"""Action to create a delayed action on an ELB to start at a later date
:example:
.. code-block: yaml
policies:
- name: appelb-failed-mark-for-op
resource: app-elb
filters:
- "tag:custodian_elb_cleanup": absent
- State: failed
actions:
- type: mark-for-op
tag: custodian_elb_cleanup
msg: "AppElb failed: {op}@{action_date}"
op: delete
days: 1
"""
batch_size = 1
permissions = ("elasticloadbalancing:AddTags",)
def process_resource_set(self, resource_set, ts):
_add_appelb_tags(
resource_set,
self.manager.session_factory,
ts)
@actions.register('tag')
class AppELBTagAction(tags.Tag):
"""Action to create tag/tags on an ELB
:example:
.. code-block: yaml
policies:
- name: appelb-create-required-tag
resource: app-elb
filters:
- "tag:RequiredTag": absent
actions:
- type: tag
key: RequiredTag
value: RequiredValue
"""
batch_size = 1
permissions = ("elasticloadbalancing:AddTags",)
def process_resource_set(self, resource_set, ts):
_add_appelb_tags(
resource_set,
self.manager.session_factory,
ts)
@actions.register('remove-tag')
class AppELBRemoveTagAction(tags.RemoveTag):
"""Action to remove tag/tags from an ELB
:example:
.. code-block: yaml
policies:
- name: appelb-delete-expired-tag
resource: app-elb
filters:
- "tag:ExpiredTag": present
actions:
- type: remove-tag
tags: ["ExpiredTag"]
"""
batch_size = 1
permissions = ("elasticloadbalancing:RemoveTags",)
def process_resource_set(self, resource_set, tag_keys):
_remove_appelb_tags(
resource_set,
self.manager.session_factory,
tag_keys)
@actions.register('delete')
class AppELBDeleteAction(BaseAction):
"""Action to delete an ELB
To avoid unwanted deletions of ELB, it is recommended to apply a filter
to the rule
:example:
.. code-block: yaml
policies:
- name: appelb-delete-failed-elb
resource: app-elb
filters:
- State: failed
actions:
- delete
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = (
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:ModifyLoadBalancerAttributes",)
def process(self, load_balancers):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_alb, load_balancers))
def process_alb(self, alb):
client = local_session(self.manager.session_factory).client('elbv2')
try:
if self.data.get('force'):
client.modify_load_balancer_attributes(
LoadBalancerArn=alb['LoadBalancerArn'],
Attributes=[{
'Key': 'deletion_protection.enabled',
'Value': 'false',
}])
self.manager.retry(
client.delete_load_balancer, LoadBalancerArn=alb['LoadBalancerArn'])
except Exception as e:
if e.response['Error']['Code'] in ['OperationNotPermitted',
'LoadBalancerNotFound']:
self.log.warning(
"Exception trying to delete ALB: %s error: %s",
alb['LoadBalancerArn'], e)
return
raise
class AppELBListenerFilterBase(object):
""" Mixin base class for filters that query LB listeners.
"""
permissions = ("elasticloadbalancing:DescribeListeners",)
def initialize(self, albs):
def _process_listeners(alb):
client = local_session(
self.manager.session_factory).client('elbv2')
results = client.describe_listeners(
LoadBalancerArn=alb['LoadBalancerArn'])
self.listener_map[alb['LoadBalancerArn']] = results['Listeners']
self.listener_map = defaultdict(list)
with self.manager.executor_factory(max_workers=2) as w:
list(w.map(_process_listeners, albs))
class AppELBAttributeFilterBase(object):
""" Mixin base class for filters that query LB attributes.
"""
def initialize(self, albs):
def _process_attributes(alb):
if 'Attributes' not in alb:
alb['Attributes'] = {}
client = local_session(
self.manager.session_factory).client('elbv2')
results = client.describe_load_balancer_attributes(
LoadBalancerArn=alb['LoadBalancerArn'])
# flatten out the list of dicts and cast
for pair in results['Attributes']:
k = pair['Key']
v = pair['Value']
if v.isdigit():
v = int(v)
elif v == 'true':
v = True
elif v == 'false':
v = False
alb['Attributes'][k] = v
with self.manager.executor_factory(max_workers=2) as w:
list(w.map(_process_attributes, albs))
@filters.register('is-logging')
class IsLoggingFilter(Filter, AppELBAttributeFilterBase):
""" Matches AppELBs that are logging to S3.
bucket and prefix are optional
:example:
.. code-block: yaml
policies:
- name: alb-is-logging-test
resource: app-elb
filters:
- type: is-logging
- name: alb-is-logging-bucket-and-prefix-test
resource: app-elb
filters:
- type: is-logging
bucket: prodlogs
prefix: alblogs
"""
permissions = ("elasticloadbalancing:DescribeLoadBalancerAttributes",)
schema = type_schema('is-logging',
bucket={'type': 'string'},
prefix={'type': 'string'}
)
def process(self, resources, event=None):
self.initialize(resources)
bucket_name = self.data.get('bucket', None)
bucket_prefix = self.data.get('prefix', None)
return [alb for alb in resources
if alb['Attributes']['access_logs.s3.enabled'] and
(not bucket_name or bucket_name == alb['Attributes'].get(
'access_logs.s3.bucket', None)) and
(not bucket_prefix or bucket_prefix == alb['Attributes'].get(
'access_logs.s3.prefix', None))
]
@filters.register('is-not-logging')
class IsNotLoggingFilter(Filter, AppELBAttributeFilterBase):
""" Matches AppELBs that are NOT logging to S3.
or do not match the optional bucket and/or prefix.
:example:
.. code-block: yaml
policies:
- name: alb-is-not-logging-test
resource: app-elb
filters:
- type: is-not-logging
- name: alb-is-not-logging-bucket-and-prefix-test
resource: app-elb
filters:
- type: is-not-logging
bucket: prodlogs
prefix: alblogs
"""
permissions = ("elasticloadbalancing:DescribeLoadBalancerAttributes",)
schema = type_schema('is-not-logging',
bucket={'type': 'string'},
prefix={'type': 'string'}
)
def process(self, resources, event=None):
self.initialize(resources)
bucket_name = self.data.get('bucket', None)
bucket_prefix = self.data.get('prefix', None)
return [alb for alb in resources
if not alb['Attributes']['access_logs.s3.enabled'] or
(bucket_name and bucket_name != alb['Attributes'].get(
'access_logs.s3.bucket', None)) or
(bucket_prefix and bucket_prefix != alb['Attributes'].get(
'access_logs.s3.prefix', None))
]
class AppELBTargetGroupFilterBase(object):
""" Mixin base class for filters that query LB target groups.
"""
def initialize(self, albs):
self.target_group_map = defaultdict(list)
target_groups = self.manager.get_resource_manager(
'app-elb-target-group').resources()
for target_group in target_groups:
for load_balancer_arn in target_group['LoadBalancerArns']:
self.target_group_map[load_balancer_arn].append(target_group)
@filters.register('listener')
class AppELBListenerFilter(ValueFilter, AppELBListenerFilterBase):
"""Filter ALB based on matching listener attributes
Adding the `matched` flag will filter on previously matched listeners
:example:
.. code-block: yaml
policies:
- name: app-elb-invalid-ciphers
resource: app-elb
filters:
- type: listener
key: Protocol
value: HTTPS
- type: listener
key: SslPolicy
value: ['ELBSecurityPolicy-TLS-1-1-2017-01','ELBSecurityPolicy-TLS-1-2-2017-01']
op: ni
matched: true
actions:
- type: modify-listener
sslpolicy: "ELBSecurityPolicy-TLS-1-2-2017-01"
"""
schema = type_schema(
'listener', rinherit=ValueFilter.schema, matched={'type': 'boolean'})
permissions = ("elasticloadbalancing:DescribeLoadBalancerAttributes",)
def validate(self):
if not self.data.get('matched'):
return
listeners = [f for f in self.manager.filters
if isinstance(f, self.__class__)]
found = False
for f in listeners[:listeners.index(self)]:
if not f.data.get('matched', False):
found = True
break
if not found:
raise FilterValidationError(
"matched listener filter, requires preceding listener filter")
return self
def process(self, albs, event=None):
self.initialize(albs)
return super(AppELBListenerFilter, self).process(albs, event)
def __call__(self, alb):
listeners = self.listener_map[alb['LoadBalancerArn']]
if self.data.get('matched', False):
listeners = alb.pop('c7n:MatchedListeners', [])
found_listeners = False
for listener in listeners:
if self.match(listener):
set_annotation(alb, 'c7n:MatchedListeners', listener)
found_listeners = True
return found_listeners
@actions.register('modify-listener')
class AppELBModifyListenerPolicy(BaseAction):
"""Action to modify the policy for an App ELB
:example:
.. code-block: yaml
policies:
- name: appelb-modify-listener
resource: app-elb
filters:
- type: listener
key: Protocol
value: HTTP
actions:
- type: modify-listener
protocol: HTTPS
sslpolicy: "ELBSecurityPolicy-TLS-1-2-2017-01"
certificate: "arn:aws:acm:region:123456789012:certificate/12345678-\
1234-1234-1234-123456789012"
"""
schema = type_schema(
'modify-listener',
port={'type': 'integer'},
protocol={'enum': ['HTTP', 'HTTPS']},
sslpolicy={'type': 'string'},
certificate={'type': 'string'}
)
permissions = ("elasticloadbalancing:ModifyListener",)
def validate(self):
for f in self.manager.data.get('filters', ()):
if 'listener' in f.get('type', ()):
return self
raise FilterValidationError(
"modify-listener action requires the listener filter")
def process(self, load_balancers):
args = {}
if 'port' in self.data:
args['Port'] = self.data.get('port')
if 'protocol' in self.data:
args['Protocol'] = self.data.get('protocol')
if 'sslpolicy' in self.data:
args['SslPolicy'] = self.data.get('sslpolicy')
if 'certificate' in self.data:
args['Certificates'] = [{'CertificateArn': self.data.get('certificate')}]
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_alb, load_balancers, itertools.repeat(args)))
def process_alb(self, alb, args):
client = local_session(self.manager.session_factory).client('elbv2')
for matched_listener in alb.get('c7n:MatchedListeners', ()):
client.modify_listener(
ListenerArn=matched_listener['ListenerArn'],
**args)
@filters.register('healthcheck-protocol-mismatch')
class AppELBHealthCheckProtocolMismatchFilter(Filter,
AppELBTargetGroupFilterBase):
"""Filter AppELBs with mismatched health check protocols
A mismatched health check protocol is where the protocol on the target group
does not match the load balancer health check protocol
:example:
.. code-block: yaml
policies:
- name: appelb-healthcheck-mismatch
resource: app-elb
filters:
- healthcheck-protocol-mismatch
"""
schema = type_schema('healthcheck-protocol-mismatch')
permissions = ("elasticloadbalancing:DescribeTargetGroups",)
def process(self, albs, event=None):
def _healthcheck_protocol_mismatch(alb):
for target_group in self.target_group_map[alb['LoadBalancerArn']]:
if (target_group['Protocol'] !=
target_group['HealthCheckProtocol']):
return True
return False
self.initialize(albs)
return [alb for alb in albs if _healthcheck_protocol_mismatch(alb)]
@filters.register('target-group')
class AppELBTargetGroupFilter(ValueFilter, AppELBTargetGroupFilterBase):
"""Filter ALB based on matching target group value"""
schema = type_schema('target-group', rinherit=ValueFilter.schema)
permissions = ("elasticloadbalancing:DescribeTargetGroups",)
def process(self, albs, event=None):
self.initialize(albs)
return super(AppELBTargetGroupFilter, self).process(albs, event)
def __call__(self, alb):
target_groups = self.target_group_map[alb['LoadBalancerArn']]
return self.match(target_groups)
@filters.register('default-vpc')
class AppELBDefaultVpcFilter(DefaultVpcBase):
"""Filter all ELB that exist within the default vpc
:example:
.. code-block: yaml
policies:
- name: appelb-in-default-vpc
resource: app-elb
filters:
- default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, alb):
return alb.get('VpcId') and self.match(alb.get('VpcId')) or False
@resources.register('app-elb-target-group')
class AppELBTargetGroup(QueryResourceManager):
"""Resource manager for v2 ELB target groups.
"""
class resource_type(object):
service = 'elbv2'
type = 'app-elb-target-group'
enum_spec = ('describe_target_groups', 'TargetGroups', None)
name = 'TargetGroupName'
id = 'TargetGroupArn'
filter_name = None
filter_type = None
dimension = None
date = None
filter_registry = FilterRegistry('app-elb-target-group.filters')
action_registry = ActionRegistry('app-elb-target-group.actions')
retry = staticmethod(get_retry(('Throttling',)))
filter_registry.register('tag-count', tags.TagCountFilter)
filter_registry.register('marked-for-op', tags.TagActionFilter)
@classmethod
def get_permissions(cls):
# override as the service is not the iam prefix
return ("elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTags")
def augment(self, target_groups):
def _describe_target_group_health(target_group):
client = local_session(self.session_factory).client('elbv2')
result = client.describe_target_health(
TargetGroupArn=target_group['TargetGroupArn'])
target_group['TargetHealthDescriptions'] = result[
'TargetHealthDescriptions']
with self.executor_factory(max_workers=2) as w:
list(w.map(_describe_target_group_health, target_groups))
_describe_target_group_tags(
target_groups, self.session_factory,
self.executor_factory, self.retry)
return target_groups
def _describe_target_group_tags(target_groups, session_factory,
executor_factory, retry):
def _process_tags(target_group_set):
client = local_session(session_factory).client('elbv2')
target_group_map = {
target_group['TargetGroupArn']:
target_group for target_group in target_group_set
}
results = retry(
client.describe_tags,
ResourceArns=list(target_group_map.keys()))
for tag_desc in results['TagDescriptions']:
if ('ResourceArn' in tag_desc and
tag_desc['ResourceArn'] in target_group_map):
target_group_map[
tag_desc['ResourceArn']
]['Tags'] = tag_desc['Tags']
with executor_factory(max_workers=2) as w:
list(w.map(_process_tags, chunks(target_groups, 20)))
def _add_target_group_tags(target_groups, session_factory, ts):
client = local_session(session_factory).client('elbv2')
client.add_tags(
ResourceArns=[
target_group['TargetGroupArn'] for target_group in target_groups
],
Tags=ts)
def _remove_target_group_tags(target_groups, session_factory, tag_keys):
client = local_session(session_factory).client('elbv2')
client.remove_tags(
ResourceArns=[
target_group['TargetGroupArn'] for target_group in target_groups
],
TagKeys=tag_keys)
@AppELBTargetGroup.action_registry.register('mark-for-op')
class AppELBTargetGroupMarkForOpAction(tags.TagDelayedAction):
"""Action to specify a delayed action on an ELB target group"""
batch_size = 1
permissions = ("elasticloadbalancing:AddTags",)
def process_resource_set(self, resource_set, ts):
_add_target_group_tags(
resource_set,
self.manager.session_factory,
ts)
@AppELBTargetGroup.action_registry.register('tag')
class AppELBTargetGroupTagAction(tags.Tag):
"""Action to create tag/tags on an ELB target group
:example:
.. code-block: yaml
policies:
- name: appelb-targetgroup-add-required-tag
resource: app-elb-target-group
filters:
- "tag:RequiredTag": absent
actions:
- type: tag
key: RequiredTag
value: RequiredValue
"""
batch_size = 1
permissions = ("elasticloadbalancing:AddTags",)
def process_resource_set(self, resource_set, ts):
_add_target_group_tags(
resource_set,
self.manager.session_factory,
ts)
@AppELBTargetGroup.action_registry.register('remove-tag')
class AppELBTargetGroupRemoveTagAction(tags.RemoveTag):
"""Action to remove tag/tags from ELB target group
:example:
.. code-block: yaml
policies:
- name: appelb-targetgroup-remove-expired-tag
resource: app-elb-target-group
filters:
- "tag:ExpiredTag": present
actions:
- type: remove-tag
tags: ["ExpiredTag"]
"""
batch_size = 1
permissions = ("elasticloadbalancing:RemoveTags",)
def process_resource_set(self, resource_set, tag_keys):
_remove_target_group_tags(
resource_set,
self.manager.session_factory,
tag_keys)
@AppELBTargetGroup.filter_registry.register('default-vpc')
class AppELBTargetGroupDefaultVpcFilter(DefaultVpcBase):
"""Filter all application elb target groups within the default vpc
:example:
.. code-block: yaml
policies:
- name: appelb-targetgroups-default-vpc
resource: app-elb-target-group
filters:
- default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, target_group):
return (target_group.get('VpcId') and
self.match(target_group.get('VpcId')) or False)
@AppELBTargetGroup.action_registry.register('delete')
class AppELBTargetGroupDeleteAction(BaseAction):
"""Action to delete ELB target group
It is recommended to apply a filter to the delete policy to avoid unwanted
deletion of any app elb target groups.
:example:
.. code-block: yaml
policies:
- name: appelb-targetgroups-delete-unused
resource: app-elb-target-group
filters:
- "tag:SomeTag": absent
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('elasticloadbalancing:DeleteTargetGroup',)
def process(self, target_group):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_targetgroup, target_group))
def process_targetgroup(self, target_group):
client = local_session(self.manager.session_factory).client('elbv2')
self.manager.retry(
client.delete_target_group,
TargetGroupArn=target_group['TargetGroupArn'])
|
|
import pickle
from unittest import TestCase
import numpy as np
import os
from sac.model.audacity_label import AudacityLabel
from sac.util import Util
class UtilTests(TestCase):
def setUp(self):
self.stats_csv = os.path.join(os.path.dirname(__file__), 'fixtures/example_yaafe_stats.csv')
self.no_stats_csv = os.path.join(os.path.dirname(__file__), 'fixtures/example_yaafe_no_stats.csv')
self.double_stats_csv = os.path.join(os.path.dirname(__file__), 'fixtures/example_yaafe_double_stats.csv')
self.no_stats_derivate = os.path.join(os.path.dirname(__file__), 'fixtures/example_yaafe_no_stats_derivate.csv')
self.stats_derivate = os.path.join(os.path.dirname(__file__), 'fixtures/example_yaafe_stats_derivate.csv')
# with open(os.path.abspath(os.path.join(os.path.dirname(__file__), 'fixtures/sm.pickle')), 'rb') as f:
# self.sm = pickle.load(f)
# with open(os.path.abspath(os.path.join(os.path.dirname(__file__), 'fixtures/timestamps.pickle')), 'rb') as f:
# self.timestamps = pickle.load(f)
# # yaafe header parser
def test_parse_yaafe_header_stats(self):
header = Util.parse_yaafe_header(self.stats_csv)
self.assertEqual(22050, header['samplerate'])
self.assertEqual(15360, header['effective_step_size'])
def test_parse_yaafe_header_no_stats(self):
header = Util.parse_yaafe_header(self.no_stats_csv)
self.assertEqual(22050, header['samplerate'])
self.assertEqual(512, header['effective_step_size'])
def test_parse_yaafe_header_double_stats(self):
header = Util.parse_yaafe_header(self.double_stats_csv)
self.assertEqual(22050, header['samplerate'])
self.assertEqual(460800, header['effective_step_size'])
def test_parse_yaafe_header_no_stats_derivate(self):
header = Util.parse_yaafe_header(self.no_stats_derivate)
self.assertEqual(22050, header['samplerate'])
self.assertEqual(512, header['effective_step_size'])
def test_parse_yaafe_header_stats_derivate(self):
header = Util.parse_yaafe_header(self.stats_derivate)
self.assertEqual(22050, header['samplerate'])
self.assertEqual(15360, header['effective_step_size'])
# # yaafe csv parser
def test_load_yaafe_csv_no_stats(self):
timestamps, features = Util.load_yaafe_csv(self.no_stats_csv)
self.assertEqual((17, 2), features.shape)
self.assertEqual(0.37151927437641724, timestamps[-1])
def test_load_yaafe_csv_stats(self):
timestamps, features = Util.load_yaafe_csv(self.stats_csv)
self.assertEqual((17, 2), features.shape)
self.assertEqual(11.145578231292516, timestamps[-1])
def test_load_yaafe_csv_double_stats(self):
timestamps, features = Util.load_yaafe_csv(self.double_stats_csv)
self.assertEqual((17, 2), features.shape)
self.assertEqual(334.3673469387755, timestamps[-1])
# # merge classifications
def test_generate_labels_from_classifications(self):
classifications = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1]
timestamps = [0.0, 0.6965986394557823, 1.3931972789115645, 2.089795918367347, 2.786394557823129,
3.4829931972789114, 4.179591836734694, 4.876190476190477, 5.572789115646258, 6.2693877551020405,
6.965986394557823, 7.662585034013605, 8.359183673469389, 9.05578231292517, 9.752380952380953,
10.448979591836734, 11.145578231292516, 11.842176870748299, 12.538775510204081,
13.235374149659863, 13.931972789115646, 14.628571428571428, 15.32517006802721, 16.021768707482995]
labels = Util.generate_labels_from_classifications(classifications, timestamps)
expected_labels = [AudacityLabel(0.0, 1.3931972789115645, 1),
AudacityLabel(1.3931972789115645, 9.752380952380953, 0),
AudacityLabel(9.752380952380953, 10.448979591836736, 1),
AudacityLabel(10.448979591836734, 11.145578231292516, 0),
AudacityLabel(11.145578231292516, 12.538775510204081, 1),
AudacityLabel(12.538775510204081, 13.235374149659863, 0),
AudacityLabel(13.235374149659863, 16.718367346938777, 1)]
self.assertListEqual(expected_labels, labels)
def test_combine_adjacent_labels_of_the_same_class(self):
input_labels = [
AudacityLabel(0, 10, "m"),
AudacityLabel(10, 20, "m"),
AudacityLabel(20, 21, "s"),
AudacityLabel(21, 22, "s"),
AudacityLabel(22, 23, "s"),
AudacityLabel(23, 30, "m")
]
expected_labels = [
AudacityLabel(0, 20, "m"),
AudacityLabel(20, 23, "s"),
AudacityLabel(23, 30, "m"),
]
actual_labels = Util.combine_adjacent_labels_of_the_same_class(input_labels)
self.assertListEqual(expected_labels, actual_labels)
def test_calculate_classes_percentages(self):
classifications = [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1]
percentages = Util.calculate_classes_percentages(classifications)
self.assertAlmostEqual(0.5833333333333334, percentages[0])
self.assertAlmostEqual(0.4166666666666667, percentages[1])
def test_get_annotated_data(self):
timestamps = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
data = np.random.rand(7, 10)
labels = [
AudacityLabel(1.2, 2.5, "m"),
AudacityLabel(4.5, 6.0, "s")
]
annotated_data = Util.get_annotated_data(timestamps, data, labels)
self.assertTrue("m" in annotated_data)
self.assertTrue("s" in annotated_data)
self.assertTrue(data[2, :] in annotated_data["m"])
self.assertTrue(data[5, :] in annotated_data["s"])
self.assertTrue(data[6, :] in annotated_data["s"])
def test_get_annotated_data_x_y(self):
timestamps = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
data = np.random.rand(8, 10)
labels = [
AudacityLabel(1.2, 2.5, "m"),
AudacityLabel(4.5, 6.2, "s")
]
x, y, classes, timestamps = Util.get_annotated_data_x_y(timestamps, data, labels)
self.assertEqual(3, x.shape[0])
self.assertListEqual(["m", "s", "s"], y)
self.assertListEqual(["m", "s"], classes)
self.assertListEqual([0.0, 1.0, 2.0], timestamps)
labels = [
AudacityLabel(1.0, 5.5, 'A'),
AudacityLabel(5.5, 10.0, 'B'),
AudacityLabel(15.0, 20.5, 'C')
]
X, y, classes, new_timestamps = Util.get_annotated_data_x_y([float(i) for i in range(0, 25)], np.ones((25, 10)),
labels)
self.assertListEqual(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'C', 'C'], y)
self.assertListEqual([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0], new_timestamps)
def test_split_data_based_on_annotation(self):
X = np.array([
[1, 2, 3, 4],
[2, 3, 4, 5],
[4, 5, 6, 7]
])
Y = [0, 0, 1]
classes = ["music", "speech"]
data = Util.split_data_based_on_annotation(X, Y, classes)
self.assertEqual(data["music"].shape[0], 2)
self.assertEqual(data["speech"].shape[0], 1)
# def test_kmeans_image_quantisation(self):
# quantised_image = Util.kmeans_image_quantisation(self.sm, 5)
# self.assertEquals(5, len(np.unique(quantised_image)))
def test_combine_peaks(self):
a_peaks = [3, 8]
a_peak_values = [2, 2, 2, 3, 2, 2, 2, 4, 5, 4, 2, 2, 2]
b_peaks = [6]
b_peak_values = [2, 2, 2, 2, 2, 2, 7, 2, 2, 2, 2, 2, 2]
p, v = Util.combine_peaks(a_peaks, a_peak_values, b_peaks, b_peak_values)
self.assertListEqual([3, 6, 8], p)
def test_non_maximum_suppression(self):
peaks = [2, 3, 4, 7, 8, 9]
values = [2, 2, 2, 3, 2, 2, 2, 4, 5, 4, 2, 2, 2]
p, v = Util.non_maximum_suppression(peaks, values)
self.assertListEqual([3, 8], p)
self.assertListEqual(values, v)
def test_get_annotated_labels_from_predictions_and_sm_segments(self):
timestamps = [0, 1, 2, 3, 4, 5, 6]
segments = [
AudacityLabel(0, 2.5, '-'),
AudacityLabel(2.5, 5.5, '-')
]
frame_level_predictions = np.array(['v', 'v', 'v', 's', 's', 'v', 'v'])
labels = Util.get_annotated_labels_from_predictions_and_sm_segments(frame_level_predictions, segments, timestamps)
self.assertListEqual(['v', 's'], [l.label for l in labels])
def test_get_unshifted_labels(self):
predicted_lbls = [
AudacityLabel(1.0, 3.0, "A"),
AudacityLabel(8.0, 12.0, "B")
]
shifted_unshifted_labels = [
{
'old_label': AudacityLabel(1.0, 5.0, 'A'),
'new_label': AudacityLabel(0.0, 4.0, 'A'),
'shift': 1.0
},
{
'old_label': AudacityLabel(5.0, 10.0, 'B'),
'new_label': AudacityLabel(4.0, 9.0, 'B'),
'shift': 1.0
},
{
'old_label': AudacityLabel(15.0, 20.0, 'B'),
'new_label': AudacityLabel(9.0, 14.0, 'B'),
'shift': 6.0
}
]
lbls = Util.get_unshifted_labels(predicted_lbls, shifted_unshifted_labels)
self.assertListEqual([AudacityLabel(2.0, 4.0, 'A'), AudacityLabel(9.0, 18.0, 'B')], lbls)
def test_get_unshifted_timestamps(self):
lbls = [
{
'old_label': AudacityLabel(1.0, 5.0, 'A'),
'new_label': AudacityLabel(0.0, 4.0, 'A'),
'shift': 1.0
},
{
'old_label': AudacityLabel(5.0, 10.0, 'B'),
'new_label': AudacityLabel(4.0, 9.0, 'B'),
'shift': 1.0
},
{
'old_label': AudacityLabel(15.0, 20.0, 'B'),
'new_label': AudacityLabel(9.0, 14.0, 'B'),
'shift': 6.0
}
]
shifted_timestamps = [3.0, 4.0, 11.0]
expected_unshifted_timestamps = [
shifted_timestamps[0] + lbls[0]['shift'],
shifted_timestamps[1] + lbls[1]['shift'],
shifted_timestamps[2] + lbls[2]['shift']
]
unshifted_timestamps = Util.get_unshifted_timestamps(shifted_timestamps, lbls)
self.assertListEqual(expected_unshifted_timestamps, unshifted_timestamps)
def test_get_annotation_time_shift(self):
labelsA = [
AudacityLabel(1.0, 5.0, 'A'),
AudacityLabel(5.0, 10.0, 'B'),
AudacityLabel(15.0, 20.0, 'B')
]
new_labels = Util.get_annotation_time_shift(labelsA)
expected = [
{
'old_label': AudacityLabel(1.0, 5.0, 'A'),
'new_label': AudacityLabel(0.0, 4.0, 'A'),
'shift': 1.0
},
{
'old_label': AudacityLabel(5.0, 10.0, 'B'),
'new_label': AudacityLabel(4.0, 9.0, 'B'),
'shift': 1.0
},
{
'old_label': AudacityLabel(15.0, 20.0, 'B'),
'new_label': AudacityLabel(9.0, 14.0, 'B'),
'shift': 6.0
}
]
self.assertListEqual(expected, new_labels)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" server configuration for the assetMG tool"""
import json
from math import floor
from flask import Flask, request, jsonify, render_template
from google.ads.googleads.client import GoogleAdsClient
import app.backend.setup as setup
from app.backend.mutate import mutate_ad
from app.backend import structure
from app.backend.upload_asset import upload
from app.backend.error_handling import error_mapping
from app.backend.get_yt import get_all_yt_videos
from app.backend.helpers import populate_adgroup_details
from googleapiclient.discovery import build
from pathlib import Path
import copy
import logging
import yaml
from google_auth_oauthlib.flow import InstalledAppFlow, Flow
import webbrowser
import threading
import os
import shutil
from werkzeug.utils import secure_filename
import string
from google.cloud import storage
from app.backend.setup import CONFIG_PATH, CONFIG_PATH_GS, PREFIX, CLOUD_VERSION, BASE_URL, BUCKET_NAME, YT_CONFIG
from app.backend.setup import YT_CONFIG_FILE_PATH, YT_CONFIG_FILE_PATH_GS, CONFIG_FILE_PATH,CONFIG_FILE_PATH_GS
# from flask_cors import CORS
# app = Flask(__name__)
# CORS(app)
app = Flask(__name__, static_url_path='',
static_folder='app/asset_browser/frontend/dist/frontend',
template_folder='app/asset_browser/frontend/dist/frontend')
UPLOAD_FOLDER = Path(PREFIX + 'uploads')
UPLOAD_FOLDER.mkdir(parents=True, exist_ok=True)
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'zip'}
# A list of allowed image ratios and their repectavie minimum sizes
ALLOWED_RATIOS = [[(1,1),(200,200)], [(4,5),(320,400)], [(1.91,1),(600,314)]]
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
#Only used in local version
LOGS_PATH = Path('app/logs/server.log')
YT_CLIENT_SCOPES = ['https://www.googleapis.com/auth/youtube.upload']
Path(PREFIX + 'cache').mkdir(parents=True, exist_ok=True)
if CLOUD_VERSION:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s')
else:
logging.basicConfig(filename=LOGS_PATH,
level=logging.INFO,
format='%(asctime)s:%(levelname)s:%(message)s')
flow = None
ADWORDS_CLIENT = ''
GOOGLEADS_CLIENT = ''
def get_global_googleads_client():
global GOOGLEADS_CLIENT
if GOOGLEADS_CLIENT:
return GOOGLEADS_CLIENT
else:
setup.set_api_configs()
GOOGLEADS_CLIENT = GoogleAdsClient.load_from_storage(
CONFIG_PATH / 'google-ads.yaml')
return GOOGLEADS_CLIENT
# check if config is valid. if yes, init clients and create struct
try:
# Copy Google Storage file to tmp file
setup.download_file_from_gcs(CONFIG_FILE_PATH_GS, CONFIG_FILE_PATH)
with open(CONFIG_FILE_PATH, 'r') as f:
config_file = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError:
config_file = {'config_valid': 0}
@app.route('/')
def upload_frontend():
return render_template('index.html')
@app.route('/config/', methods=['GET'])
def get_configs():
"""return all config parameters"""
try:
setup.download_file_from_gcs(CONFIG_FILE_PATH_GS, CONFIG_FILE_PATH)
with open(CONFIG_FILE_PATH, 'r') as fi:
config = yaml.load(fi, Loader=yaml.FullLoader)
except FileNotFoundError:
config = {
'client_customer_id': '',
'client_id': '',
'client_secret': '',
'developer_token': '',
'refresh_token': '',
'config_valid': 0,
}
return _build_response(json.dumps(config))
@app.route('/set-configs/', methods=['POST'])
def set_secret():
"""gets client id, client secret, dev token, account id.
Saves to config.yaml and returns refresh url"""
data = request.get_json(force=True)
# determines if a reset to prev valid config or trying to setup new config
is_reset = True
if not data.get('config_valid'):
data['refresh_token'] = None
data['config_valid'] = 0
is_reset = False
with open(CONFIG_FILE_PATH, 'w') as fi:
yaml.dump(data, fi)
setup.upload_file_to_gcs(CONFIG_FILE_PATH, CONFIG_FILE_PATH_GS)
# If its just a reset - no need to generate a url
if is_reset:
return _build_response(msg=json.dumps(
'successfully restored previous configs'), status=200)
init_flow(
client_id=data['client_id'],
client_secret=data['client_secret']
)
return _build_response(msg=json.dumps(""), status=200)
def init_flow(from_client_config=False, client_id=None, client_secret=None):
global flow
try:
if from_client_config:
# Copy Google Storage file to tmp file if cloud version
setup.download_file_from_gcs(CONFIG_FILE_PATH_GS, CONFIG_FILE_PATH)
# Get credentials from config file
with open(CONFIG_FILE_PATH, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
client_id = config['client_id']
client_secret = config['client_secret']
client_config = {
'web': {
'client_id': client_id,
'client_secret': client_secret,
'auth_uri': 'https://accounts.google.com/o/oauth2/auth',
'token_uri': 'https://accounts.google.com/o/oauth2/token',
}
}
flow = Flow.from_client_config(
client_config=client_config,
scopes=[
'openid',
'https://www.googleapis.com/auth/adwords',
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/youtube.readonly'
]
)
flow.redirect_uri = BASE_URL
auth_url, _ = flow.authorization_url()
status=200
except Exception as e:
logging.error(str(e))
status=500
auth_url = ''
@app.route('/get-refresh-token/', methods=['POST'])
def get_refresh_token():
"""Gets the refresh token from a given frontend refresh
access token."""
if not flow:
init_flow(from_client_config=True)
logging.info(flow)
data = request.get_json(force=True)
code = data['code']
failed, refresh_token = setup.get_refresh_token(code, flow)
if failed:
return _build_response(
msg=json.dumps('Could not get refresh token.'),
status=500
)
return _build_response(
msg=json.dumps(refresh_token),
status=200
)
@app.route('/set-refresh/', methods=['POST'])
def set_refresh_token():
"""Can only be called if set-configs was called before.
Gets the refresh token and saves it to config.yaml
If successful calls init_client()"""
data = request.get_json(force=True)
code = data['code']
set_status, refresh_token = setup.set_refresh(code,flow)
if set_status:
# meaning that set_refresh failed
return _build_response(msg=json.dumps(
'could not update refresh token'), status=500)
init_status = init_clients()
if init_status:
return _build_response(msg=json.dumps('config invalid'), status=500)
else:
return _build_response(msg=json.dumps(refresh_token),status=200)
@app.route('/yt-config/', methods=['GET'])
def get_yt_configs():
"""return all config parameters"""
try:
setup.download_file_from_gcs(YT_CONFIG_FILE_PATH_GS, YT_CONFIG_FILE_PATH)
with open(YT_CONFIG_FILE_PATH, 'r') as fi:
config = json.load(fi)
except Exception as e:
config = {
'channel_id': '',
'api_key': '',
}
return _build_response(json.dumps(config),status=200)
@app.route('/set-yt/', methods=['POST'])
def set_yt():
"""Set yt-conifg.json with channel id and API key.
Will be used to get all the channel's videos. Gets a JSON with two keys:
channel_id
api_key
"""
data = request.get_json(force=True)
try:
channel_id = data['channel_id']
with open(YT_CONFIG_FILE_PATH, 'w') as f:
json.dump({'channel_id':channel_id},f)
setup.upload_file_to_gcs(YT_CONFIG_FILE_PATH, YT_CONFIG_FILE_PATH_GS)
return _build_response(status=200)
except Exception as e:
logging.exception(e)
return _build_response(msg=json.dumps(str(e)), status=400)
@app.route('/get-yt-videos/', methods=['GET'])
def get_yt_videos():
try:
videos = get_all_yt_videos()
except Exception as e:
logging.exception(e)
return _build_response(msg=json.dumps(str(e)), status=400)
return _build_response(msg=json.dumps(videos), status=200)
@app.route('/accounts/', methods=['GET'])
def get_all_accounts():
"""gets all accounts under the configured MCC. name and id"""
try:
accounts = structure.get_accounts(get_global_googleads_client())
return _build_response(msg=json.dumps(accounts), status=200)
except Exception as e:
return _build_response(msg=str(e), status=403)
@app.route('/account-ag-struct', methods=['GET'])
def get_account_ag_struct():
"""Get account's adgroups structure."""
cid = request.args.get('cid')
try:
msg = json.dumps(structure.get_account_adgroup_structure(get_global_googleads_client(),cid))
status = 200
except Exception as e:
logging.exception('could not get adgroup structure for ' + cid)
msg = json.dumps('Could not get adgroup structure for' + cid + str(e))
status = 500
return _build_response(msg=msg, status=status)
@app.route('/accounts-assets/', methods=['GET'])
def accounts_assets():
"""if cid gets all its assets. else gets all accounts and their assets."""
cid = request.args.get('cid')
if cid:
return get_specific_accounts_assets(cid)
else:
return _build_response(
json.dumps(structure.get_all_accounts_assets(get_global_googleads_client()), indent=2))
def get_specific_accounts_assets(cid):
"""Returns all assets under the given cid."""
# check input is valid
if len(cid) < 10:
return _build_response(
'Invalid Customer Id', status=400, mimetype='text/plain')
else:
try:
res = structure.get_accounts_assets(get_global_googleads_client(), cid)
return _build_response(msg=json.dumps(res),status=200)
except Exception as e:
logging.exception('Failed getting assets for: ' + cid + ' ' + str(e))
return _build_response(status=500)
@app.route('/assets-to-ag/', methods=['GET'])
def get_asset_to_ag():
customer_id = request.args.get('customer_id')
asset_id = request.args.get('asset_id')
asset_type = request.args.get('asset_type')
status = 200
try:
data = structure.get_assets_adgroups(get_global_googleads_client(),
customer_id, asset_id, asset_type)
except Exception as e:
logging.exception(e)
data = ''
status = 404
return _build_response(msg=json.dumps(data), status=status)
@app.route('/mutate-ad/', methods=['POST'])
def mutate():
"""Assign or remove an asset from adgroups.
gets a dict with two entries:
1. refresh token to create clients
2. list of asset, account, adgourp and action.
preforms all of the actions one by one.
returns a list with the new asset objects with the changed adgroups list.
if its a text asset, returns a list with
both 'headlines' and 'descriptions' entries.
also changes the asset_to_ag.json file.
"""
data = request.get_json(force=True)
logging.info('Recived mutate request: %s', str(data['data']))
refresh_token = data['refresh_token']
data_load = data['data']
asset_id = data_load[0]['asset']['id']
asset_type = data_load[0]['asset']['type']
ga_client = init_user_googleads_client(refresh_token)
failed_assign = []
successeful_assign = []
for item in data_load:
account = item['account']
adgroup = item['adgroup']
action = item['action']
asset = item['asset']
text_type = asset.get('text_type_to_assign')
try:
mutation = mutate_ad(ga_client, account, adgroup, asset, action, text_type)
except Exception as e:
logging.exception(e)
failed_assign.append(
{
'adgroup': populate_adgroup_details(get_global_googleads_client(), account, str(adgroup)),
'error_message': error_mapping(str(e)),
'err': str(e)
}
)
mutation = 'failed'
logging.error('could not execute mutation on adgroup: %s',str(adgroup))
if mutation is None:
successeful_assign.append(adgroup)
if failed_assign and successeful_assign:
status = 206
elif successeful_assign:
status = 200
else:
status = 500
asset_handler = structure.get_assets_adgroups(get_global_googleads_client(), account, str(asset_id), asset_type)
return _build_response(msg=json.dumps(
[{'asset':asset_handler, 'failures':failed_assign}])
, status=status)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/upload-files/', methods=['POST'])
def upload_files():
status=200
msg=''
file = request.files['file']
if file and file.filename:
if allowed_file(file.filename):
filename = secure_filename(file.filename)
filename.rsplit('.', 1)[1].lower()
try:
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(file_path)
except Exception as e:
logging.error(str(e))
status=500
else:
status=500
msg='Image not in valid format'
return _build_response(msg=msg, status=status)
@app.route('/validate-dimensions/', methods=['POST'])
def validate_dimensions():
data = request.get_json(force=True)
height = data['height']
width = data['width']
valid = False
# Rounding down for 2 digits after the decimal point, as G-Ads accepts it.
ratio = floor(width / height * 100) / 100
for allowed in ALLOWED_RATIOS:
if ratio == (allowed[0][0] / allowed[0][1]):
# Check minimum dimensions
if width >= allowed[1][0] and height >= allowed[1][1]:
valid = True
break
return _build_response(msg=json.dumps({"valid": valid}))
@app.route('/clean-dir/')
def clean_dir():
status=200
folder = UPLOAD_FOLDER
for filename in os.listdir(folder):
if filename == '.gitkeep':
continue
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
logging.error('Failed to delete %s. Reason: %s' % (file_path, e))
if len(os.listdir(folder)) > 1:
status=500
return _build_response(status=status)
@app.route('/upload-asset-bulk/', methods=['POST'])
def upload_bulk():
"""Bulk upload a list of YT videos to Google Ads. Gets a list of
dicts, each represents a video to upload.
Each dict should have the following fields:
account - to which account to upload (should be the same for all entries)
name - The name of the video, will be assigned as asset name
url - a link to the video in YT
will try to upload all entries one by one. If any will fail the return status
will be 206 and will provide a list of all failed videos and the reason for
the failure.
If all will succeed will return a 200 status code.
Currently only for YT videos. For images and HTML5 assets we
will need to adress the part of moving the files into the
'uploads' folder before uploading to Google Ads.
"""
data = request.get_json(force=True)
asset_list = data['data']
refresh_token = data['refresh_token']
ga_client = init_user_googleads_client(refresh_token)
failed = []
success = []
for asset in asset_list:
try:
res = upload(
ga_client,
get_global_googleads_client(),
asset['account'],
asset_type='YOUTUBE_VIDEO',
asset_name=asset['name'],
url=asset['url']
)
success.append(res['asset'])
logging.info('uploaded video named: %s with url: %s to account: %s',
asset['name'], asset['url'], asset['account'])
except Exception as e:
failed.append({'url':asset['url'],'name':asset['name'],'error':str(e)})
logging.error('failed to upload video named: %s with url: %s to account: %s with error: %s',
asset['name'], asset['url'], asset['account'], str(e))
#If non succeed return 400
if (not success):
return _build_response(msg=json.dumps(failed), status = 400)
status = 200
#If some uploads have failed return 206 status code
if failed:
status = 206
#If all uploads succeeded return 200
return _build_response(msg=json.dumps(success),status=status)
@app.route('/upload-asset/', methods=['POST'])
def upload_asset():
"""upload new asset to account and assign to specified adgroups.
asset_type needs to be IMAGE,YOUTUBE_VIDEO,MEDIA_BUNDLE, descriptions,
headlines
"""
data = request.get_json(force=True)
if data.get('account') is None or data.get('asset_type') is None or data.get('refresh_token') is None:
return _build_response(msg='invalid arguments', status=400)
refresh_token = data.get('refresh_token')
logging.info(f"Using provided refresh token: {refresh_token}")
ga_client = init_user_googleads_client(refresh_token)
# uniform file names
asset_name = data.get('asset_name')
if asset_name and data.get('asset_type') == 'IMAGE':
asset_name = asset_name.replace(' ','_')
for char in string.punctuation:
if char not in ['_','-','.']:
asset_name = asset_name.replace(char,'')
try:
result = upload(
ga_client,
get_global_googleads_client(),
data.get('account'),
data.get('asset_type'),
asset_name,
asset_text=data.get('asset_text'),
path= UPLOAD_FOLDER / asset_name,
url=data.get('url'),
adgroups=data.get('adgroups'))
except Exception as e:
logging.exception(e)
# Asset not uploaded
return _build_response(msg=json.dumps(
{'msg': 'Could not upload asset',
'error_message': error_mapping(str(e)),
'err': str(e)}),
status=400)
# No adgroup assignment was requested, asset uploaded successfully
if result['status'] == -1:
return _build_response(msg=json.dumps(
{'msg':'Asset Uploaded','asset':result['asset']}), status=200)
# successfully uploaded and assigned to all ad groups
if result['status'] == 0:
return _build_response(msg=json.dumps(result),status=200)
# successfully assigend only to some ad groups
if result['status'] == 1:
return _build_response(msg=json.dumps(result),status=206)
# text asset couldn't assign to any ad group - therefore also not uploaded
if result['status'] == 3:
return _build_response(msg=json.dumps(
{'msg':'Text asset could not be assigned to any adgroup',
'failures':result['unsuccessfull']}), status=500)
# asset uploaded but didn't assign to any ad groups
elif result['status'] == 2:
return _build_response(msg=json.dumps(
{'msg':'could not assign asset','asset':result['asset']}), status=500)
def _build_response(msg='', status=200, mimetype='application/json'):
"""Helper method to build the response."""
response = app.response_class(msg, status=status, mimetype=mimetype)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
def init_clients():
"""Sets up googleads.yaml and google-ads.yaml and inits both clients.
tries to create struct. if succesful, marks config_valid=1 in config.yaml
to mark config is valid. Marks 0 otherwise."""
status = 0
try:
get_global_googleads_client()
with open(CONFIG_FILE_PATH, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['config_valid'] = 1
with open(CONFIG_FILE_PATH, 'w') as f:
yaml.dump(config, f)
setup.upload_file_to_gcs(CONFIG_FILE_PATH, CONFIG_FILE_PATH_GS)
except Exception as e:
logging.error(str(e))
status=1
return status
def _get_config_file_contents():
"""Gets the contents of the config file"""
try:
setup.download_file_from_gcs(CONFIG_FILE_PATH_GS, CONFIG_FILE_PATH)
with open(CONFIG_FILE_PATH, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
except Exception as e:
logging.error(str(e))
def _make_api_config_dict(refresh_token: string) -> dict:
"""Creates a standard config structure for the GoogleAds
API's client instantiation by using the generic configuration file
and adding the user's refresh token."""
c = _get_config_file_contents()
api_config = {
'client_id': c['client_id'],
'client_secret': c['client_secret'],
'client_customer_id': c['client_customer_id'],
'developer_token': c['developer_token'],
'login_customer_id': c['client_customer_id'],
'refresh_token': refresh_token,
'use_proto_plus': True
}
return api_config
def init_user_googleads_client(refresh_token: string) -> GoogleAdsClient:
"""Initiates a new user-based GoogleAds API client."""
api_config = _make_api_config_dict(refresh_token)
ga_client = GoogleAdsClient.load_from_dict(api_config)
return ga_client
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
def open_browser():
webbrowser.open_new(BASE_URL)
def get_server_config():
try:
with open('server.yaml', 'r') as f:
config_file = yaml.load(f, Loader=yaml.FullLoader)
host = config_file['hostname']
port = config_file['port']
cloud = config_file['cloud']
return host, port, cloud
except:
raise RuntimeError('Cannot find server.yaml, or server.yaml is not correctly formatted.')
if __name__ == '__main__':
host, port, cloud = get_server_config()
if (not cloud):
BASE_URL = f'http://{host}:{port}'
threading.Timer(1, open_browser).start()
app.run(host=host, port=port)
|
|
# Origin: https://github.com/jcsp/scrape/blob/master/scrape.py
# Author: John Spray (github.com/jcsp)
import difflib
from errno import ENOENT
from gzip import GzipFile
import sys
import os
import yaml
from collections import defaultdict
import re
import logging
import subprocess
import six
log = logging.getLogger('scrape')
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
class Reason(object):
def get_description(self):
return self.description
def get_detail(self):
return None
def grep(path, expr):
"""
Call out to native grep rather than feeding massive log files through python line by line
"""
p = subprocess.Popen(["grep", expr, path], stdout=subprocess.PIPE)
p.wait()
out, err = p.communicate()
if p.returncode == 0:
return six.ensure_str(out).split("\n")
else:
return []
class GenericReason(Reason):
"""
A reason inferred from a Job: matches Jobs with an apparently-similar failure
"""
def __init__(self, job, description=None):
self.failure_reason = job.get_failure_reason()
self.description = description
self.backtrace = job.get_backtrace()
if self.backtrace:
log.debug("Found a backtrace!\n{0}".format(self.backtrace))
def get_detail(self):
return self.backtrace
def get_description(self):
if self.description:
return self.description
else:
if self.backtrace:
return "Crash: {0}".format(self.failure_reason)
else:
return "Failure: {0}".format(self.failure_reason)
def match(self, job):
# I never match dead jobs
if job.get_failure_reason() is None:
return False
# If one has a backtrace but the other doesn't, we're a different thing even if the official
# failure_reason is the same
if (self.backtrace is None) != (job.get_backtrace() is None):
return False
# If we have the same backtrace, we're a match even if the teuthology failure_reason
# doesn't match (a crash is a crash, it can have different symptoms)
if self.backtrace:
ratio = difflib.SequenceMatcher(None, self.backtrace, job.get_backtrace()).ratio()
return ratio > 0.5
else:
if "Test failure:" in self.failure_reason:
return self.failure_reason == job.get_failure_reason()
elif re.search("workunit test (.*)\) on ", self.failure_reason):
workunit_name = re.search("workunit test (.*)\) on ", self.failure_reason).group(1)
other_match = re.search("workunit test (.*)\) on ", job.get_failure_reason())
return other_match is not None and workunit_name == other_match.group(1)
else:
reason_ratio = difflib.SequenceMatcher(None, self.failure_reason, job.get_failure_reason()).ratio()
return reason_ratio > 0.5
class RegexReason(Reason):
"""
A known reason matching a particular regex to failure reason
"""
def __init__(self, regexes, description):
self.description = description
if isinstance(regexes, list):
self.regexes = regexes
else:
self.regexes = [regexes]
def match(self, job):
# I never match dead jobs
if job.get_failure_reason() is None:
return False
for regex in self.regexes:
if re.match(regex, job.get_failure_reason()):
return True
return False
class AssertionReason(Reason):
def __init__(self, job):
self.assertion = job.get_assertion()
self.backtrace = job.get_backtrace()
def get_description(self):
return "Assertion: {0}".format(self.assertion)
def get_detail(self):
return self.backtrace
@classmethod
def could_be(cls, job):
return job.get_assertion() is not None
def match(self, job):
return self.assertion == job.get_assertion()
class LockdepReason(AssertionReason):
"""
Different to a normal assertion, because matches should not only
have the same assertion but the same backtrace (don't want to glob
all lockdep failures together if they are really being tripped in
different places)
"""
@classmethod
def could_be(cls, job):
if not super(LockdepReason, cls).could_be(job):
return False
return "common/lockdep" in job.get_assertion()
def get_description(self):
return "Lockdep: {0}".format(self.assertion)
def match(self, job):
if not super(LockdepReason, self).match(job):
return False
if self.backtrace:
if job.get_backtrace():
ratio = difflib.SequenceMatcher(None, self.backtrace, job.get_backtrace()).ratio()
return ratio > 0.5
else:
return False
else:
# No backtrace to compare about, allow matches based purely on assertion
return True
class DeadReason(Reason):
"""
A reason for picking up jobs with no summary.yaml
"""
def __init__(self, job):
self.description = "Dead"
self.last_tlog_line = job.get_last_tlog_line()
self.backtrace = job.get_backtrace()
def get_description(self):
return "Dead: {0}".format(self.last_tlog_line)
def get_detail(self):
return self.backtrace
@classmethod
def could_be(cls, job):
return job.summary_data is None
def match(self, job):
if job.summary_data:
return False
if self.backtrace:
if job.get_backtrace():
# We both have backtrace: use that to decide if we're the same
ratio = difflib.SequenceMatcher(None, self.backtrace, job.get_backtrace()).ratio()
return ratio > 0.5
else:
# I have BT but he doesn't, so we're different
return False
if self.last_tlog_line or job.get_last_tlog_line():
ratio = difflib.SequenceMatcher(None, self.last_tlog_line,
job.get_last_tlog_line()).ratio()
return ratio > 0.5
return True
class TimeoutReason(Reason):
def __init__(self, job):
self.timeout, self.command = self.get_timeout(job)
def get_description(self):
return "Timeout {0} running {1}".format(
self.timeout, self.command
)
@classmethod
def could_be(cls, job):
return cls.get_timeout(job) is not None
@classmethod
def get_timeout(cls, job):
if job.get_failure_reason() is None:
return None
match = re.search("status 124:.* timeout ([^ ]+) ([^']+)'", job.get_failure_reason())
if not match:
return
timeout, bin_path = match.groups()
# Given a path like /home/ubuntu/cephtest/workunit.client.0/cephtool/test.sh
# ... strip it down to cephtool/test.sh
parts = bin_path.split(os.path.sep)
parts.reverse()
rparts = []
for p in parts:
if 'workunit.' in p or 'cephtest' in p:
break
else:
rparts.append(p)
rparts.reverse()
command = os.path.sep.join(rparts)
return timeout, command
def match(self, job):
return self.get_timeout(job) == (self.timeout, self.command)
MAX_TEUTHOLOGY_LOG = 1024 * 1024 * 100
MAX_SVC_LOG = 100 * 1024 * 1024
MAX_BT_LINES = 100
class Job(object):
def __init__(self, path, job_id):
self.path = path
self.job_id = job_id
try:
self.config = yaml.safe_load(open(os.path.join(self.path, "config.yaml"), 'r'))
self.description = self.config['description']
assert self.description
except IOError:
self.config = None
self.description = None
summary_path = os.path.join(self.path, "summary.yaml")
try:
self.summary_data = yaml.safe_load(open(summary_path, 'r'))
except IOError:
self.summary_data = None
self.backtrace = None
self.assertion = None
self.populated = False
def get_success(self):
if self.summary_data:
return self.summary_data['success']
else:
return False
def get_failure_reason(self):
if self.summary_data:
return self.summary_data['failure_reason']
else:
return None
def get_last_tlog_line(self):
t_path = os.path.join(self.path, "teuthology.log")
if not os.path.exists(t_path):
return None
else:
out, err = subprocess.Popen(["tail", "-n", "1", t_path], stdout=subprocess.PIPE).communicate()
return out.strip()
def _search_backtrace(self, file_obj):
bt_lines = []
assertion = None
for line in file_obj:
# Log prefix from teuthology.log
if ".stderr:" in line:
line = line.split(".stderr:")[1]
if "FAILED assert" in line:
assertion = line.strip()
if line.startswith(" ceph version"):
# The start of a backtrace!
bt_lines = [line]
elif line.startswith(" NOTE: a copy of the executable"):
# The backtrace terminated, if we have a buffer return it
if len(bt_lines):
return ("".join(bt_lines)).strip(), assertion
else:
log.warning("Saw end of BT but not start")
elif bt_lines:
# We're in a backtrace, push the line onto the list
if len(bt_lines) > MAX_BT_LINES:
# Something wrong with our parsing, drop it
log.warning("Ignoring malparsed backtrace: {0}".format(
", ".join(bt_lines[0:3])
))
bt_lines = []
bt_lines.append(line)
return None, assertion
def get_assertion(self):
if not self.populated:
self._populate_backtrace()
return self.assertion
def get_backtrace(self):
if not self.populated:
self._populate_backtrace()
return self.backtrace
def _populate_backtrace(self):
tlog_path = os.path.join(self.path, "teuthology.log")
try:
s = os.stat(tlog_path)
except OSError:
log.warning("Missing teuthology log {0}".format(tlog_path))
return None
size = s.st_size
if size > MAX_TEUTHOLOGY_LOG:
log.debug("Ignoring teuthology log for job {0}, it is {1} bytes".format(self.job_id, size))
return None
self.backtrace, self.assertion = self._search_backtrace(open(tlog_path))
if self.backtrace:
return
for line in grep(tlog_path, "command crashed with signal"):
log.debug("Found a crash indication: {0}".format(line))
# tasks.ceph.osd.1.plana82.stderr
match = re.search("tasks.ceph.([^\.]+).([^\.]+).([^\.]+).stderr", line)
if not match:
log.warning("Not-understood crash indication {0}".format(line))
continue
svc, svc_id, hostname = match.groups()
gzipped_log_path = os.path.join(
self.path, "remote", hostname, "log", "ceph-{0}.{1}.log.gz".format(svc, svc_id))
try:
s = os.stat(gzipped_log_path)
except OSError as e:
if e.errno == ENOENT:
log.warning("Missing log {0}".format(gzipped_log_path))
continue
else:
raise
size = s.st_size
if size > MAX_SVC_LOG:
log.warning("Not checking for backtrace from {0}:{1}.{2} log, too large ({3})".format(
hostname, svc, svc_id, size
))
continue
bt, ass = self._search_backtrace(GzipFile(gzipped_log_path))
if ass and not self.assertion:
self.assertion = ass
if bt:
self.backtrace = bt
return
return None
class ValgrindReason(Reason):
def __init__(self, job):
assert self.could_be(job)
self.service_types = self._get_service_types(job)
def _get_service_types(self, job):
"""
Get dict mapping service type 'osd' etc to sorted list of violation types 'Leak_PossiblyLost' etc
"""
result = defaultdict(list)
# Lines like:
# 2014-08-22T20:07:18.668 ERROR:tasks.ceph:saw valgrind issue <kind>Leak_DefinitelyLost</kind> in /var/log/ceph/valgrind/osd.3.log.gz
for line in grep(os.path.join(job.path, "teuthology.log"), "</kind> in "):
match = re.search("<kind>(.+)</kind> in .+/(.+)", line)
if not match:
log.warning("Misunderstood line: {0}".format(line))
continue
err_typ, log_basename = match.groups()
svc_typ = six.ensure_str(log_basename).split(".")[0]
if err_typ not in result[svc_typ]:
result[svc_typ].append(err_typ)
result[svc_typ] = sorted(result[svc_typ])
return dict(result)
def get_description(self):
desc_bits = []
for service, types in list(self.service_types.items()):
desc_bits.append("{0} ({1})".format(service, ", ".join(types)))
return "Valgrind: " + ", ".join(desc_bits)
@classmethod
def could_be(cls, job):
return job.get_failure_reason() is not None and "saw valgrind issues" in job.get_failure_reason()
def match(self, job):
return self._get_service_types(job) == self.service_types
known_reasons = [
# If the failure reason indicates no packages found...
RegexReason(["Failed to fetch package version from http://",
"Command failed on .* with status 100: 'sudo apt-get update"]
, "Missing packages"),
]
def give_me_a_reason(job):
"""
If no existing reasons match the job, generate the most specific reason we can
"""
# Note: because we match known reasons, including GenericReasons, before any of
# the Timeout/Valgrind whatever, even if a run is a timeout or a valgrind failure,
# it will get matched up with a backtrace or assertion if one is there, hiding
# the valgrind/timeout aspect.
for r in known_reasons:
if r.match(job):
return r
# NB ordering matters, LockdepReason must come before AssertionReason
for klass in [DeadReason, LockdepReason, AssertionReason, TimeoutReason, ValgrindReason]:
if klass.could_be(job):
return klass(job)
return GenericReason(job)
class Scraper(object):
def __init__(self, target_dir):
self.target_dir = target_dir
log.addHandler(logging.FileHandler(os.path.join(target_dir,
"scrape.log")))
def analyze(self):
entries = os.listdir(self.target_dir)
jobs = []
for entry in entries:
job_dir = os.path.join(self.target_dir, entry)
if os.path.isdir(job_dir):
jobs.append(Job(job_dir, entry))
log.info("Found {0} jobs".format(len(jobs)))
passes = []
reasons = defaultdict(list)
for job in jobs:
if job.get_success():
passes.append(job)
continue
matched = False
for reason, reason_jobs in reasons.items():
if reason.match(job):
reason_jobs.append(job)
matched = True
break
if not matched:
reasons[give_me_a_reason(job)].append(job)
log.info("Found {0} distinct failure reasons".format(len(reasons)))
for reason, jobs in list(reasons.items()):
job_spec = "{0} jobs: {1}".format(len(jobs), [j.job_id for j in jobs]) if len(jobs) < 30 else "{0} jobs".format(len(jobs))
log.info(reason.get_description())
detail = reason.get_detail()
if detail:
log.info(detail)
log.info(job_spec)
suites = [set(j.description.split()) for j in jobs if j.description != None]
if len(suites) > 1:
log.info("suites intersection: {0}".format(sorted(set.intersection(*suites))))
log.info("suites union: {0}".format(sorted(set.union(*suites))))
elif len(suites) == 1:
log.info("suites: {0}".format(sorted(suites[0])))
log.info("")
if __name__ == '__main__':
Scraper(sys.argv[1]).analyze()
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for filesystem module."""
from __future__ import absolute_import
from __future__ import division
import bz2
import gzip
import logging
import ntpath
import os
import posixpath
import sys
import tempfile
import unittest
from builtins import range
from io import BytesIO
from future.utils import iteritems
from parameterized import param
from parameterized import parameterized
from apache_beam.io.filesystem import CompressedFile
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.filesystem import FileMetadata
from apache_beam.io.filesystem import FileSystem
class TestingFileSystem(FileSystem):
def __init__(self, pipeline_options, has_dirs=False):
super(TestingFileSystem, self).__init__(pipeline_options)
self._has_dirs = has_dirs
self._files = {}
@classmethod
def scheme(cls):
# Required for FileSystems.get_filesystem().
return 'test'
def join(self, basepath, *paths):
raise NotImplementedError
def split(self, path):
raise NotImplementedError
def mkdirs(self, path):
raise NotImplementedError
def has_dirs(self):
return self._has_dirs
def _insert_random_file(self, path, size):
self._files[path] = size
def _list(self, dir_or_prefix):
for path, size in iteritems(self._files):
if path.startswith(dir_or_prefix):
yield FileMetadata(path, size)
def create(self, path, mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
raise NotImplementedError
def open(self, path, mime_type='application/octet-stream',
compression_type=CompressionTypes.AUTO):
raise NotImplementedError
def copy(self, source_file_names, destination_file_names):
raise NotImplementedError
def rename(self, source_file_names, destination_file_names):
raise NotImplementedError
def exists(self, path):
raise NotImplementedError
def size(self, path):
raise NotImplementedError
def last_updated(self, path):
raise NotImplementedError
def checksum(self, path):
raise NotImplementedError
def delete(self, paths):
raise NotImplementedError
class TestFileSystem(unittest.TestCase):
def setUp(self):
self.fs = TestingFileSystem(pipeline_options=None)
def _flatten_match(self, match_results):
return [file_metadata
for match_result in match_results
for file_metadata in match_result.metadata_list]
@parameterized.expand([
('gs://gcsio-test/**', all),
# Does not match root-level files
('gs://gcsio-test/**/*', lambda n, i: n not in ['cat.png']),
# Only matches root-level files
('gs://gcsio-test/*', [
('cat.png', 19)
]),
('gs://gcsio-test/cow/**', [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
]),
('gs://gcsio-test/cow/ca**', [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
]),
('gs://gcsio-test/apple/[df]ish/ca*', [
('apple/fish/cat', 10),
('apple/fish/cart', 11),
('apple/fish/carl', 12),
('apple/dish/cat', 14),
('apple/dish/carl', 15),
]),
('gs://gcsio-test/apple/?ish/?a?', [
('apple/fish/cat', 10),
('apple/dish/bat', 13),
('apple/dish/cat', 14),
]),
('gs://gcsio-test/apple/fish/car?', [
('apple/fish/cart', 11),
('apple/fish/carl', 12),
]),
('gs://gcsio-test/apple/fish/b*', [
('apple/fish/blubber', 6),
('apple/fish/blowfish', 7),
('apple/fish/bambi', 8),
('apple/fish/balloon', 9),
]),
('gs://gcsio-test/apple/f*/b*', [
('apple/fish/blubber', 6),
('apple/fish/blowfish', 7),
('apple/fish/bambi', 8),
('apple/fish/balloon', 9),
]),
('gs://gcsio-test/apple/dish/[cb]at', [
('apple/dish/bat', 13),
('apple/dish/cat', 14),
]),
('gs://gcsio-test/banana/cyrano.m?', [
('banana/cyrano.md', 17),
('banana/cyrano.mb', 18),
]),
])
def test_match_glob(self, file_pattern, expected_object_names):
objects = [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
('apple/dog/blubber', 5),
('apple/fish/blubber', 6),
('apple/fish/blowfish', 7),
('apple/fish/bambi', 8),
('apple/fish/balloon', 9),
('apple/fish/cat', 10),
('apple/fish/cart', 11),
('apple/fish/carl', 12),
('apple/dish/bat', 13),
('apple/dish/cat', 14),
('apple/dish/carl', 15),
('banana/cat', 16),
('banana/cyrano.md', 17),
('banana/cyrano.mb', 18),
('cat.png', 19)
]
bucket_name = 'gcsio-test'
if callable(expected_object_names):
# A hack around the fact that the parameters do not have access to
# the "objects" list.
if expected_object_names is all:
# It's a placeholder for "all" objects
expected_object_names = objects
else:
# It's a filter function of type (str, int) -> bool
# that returns true for expected objects
filter_func = expected_object_names
expected_object_names = [
(short_path, size) for short_path, size in objects
if filter_func(short_path, size)
]
for object_name, size in objects:
file_name = 'gs://%s/%s' % (bucket_name, object_name)
self.fs._insert_random_file(file_name, size)
expected_file_names = [('gs://%s/%s' % (bucket_name, object_name), size)
for object_name, size in expected_object_names]
actual_file_names = [
(file_metadata.path, file_metadata.size_in_bytes)
for file_metadata in self._flatten_match(self.fs.match([file_pattern]))
]
self.assertEqual(set(actual_file_names), set(expected_file_names))
# Check if limits are followed correctly
limit = 3
expected_num_items = min(len(expected_object_names), limit)
self.assertEqual(
len(self._flatten_match(self.fs.match([file_pattern], [limit]))),
expected_num_items)
@parameterized.expand([
param(os_path=posixpath, sep_re='\\/'),
param(os_path=ntpath, sep_re='\\\\'),
])
def test_translate_pattern(self, os_path, sep_re):
star = r'[^/\\]*'
double_star = r'.*'
join = os_path.join
sep = os_path.sep
pattern__expected = [
(join('a', '*'), sep_re.join(['a', star])),
(join('b', '*') + sep, sep_re.join(['b', star]) + sep_re),
(r'*[abc\]', star + r'[abc\\]'),
(join('d', '**', '*'), sep_re.join(['d', double_star, star])),
]
for pattern, expected in pattern__expected:
expected += r'\Z(?ms)'
result = self.fs.translate_pattern(pattern)
self.assertEqual(result, expected)
class TestFileSystemWithDirs(TestFileSystem):
def setUp(self):
self.fs = TestingFileSystem(pipeline_options=None, has_dirs=True)
class TestCompressedFile(unittest.TestCase):
"""Base class for TestCases that deals with TempDir clean-up.
Inherited test cases will call self._new_tempdir() to start a temporary dir
which will be deleted at the end of the tests (when tearDown() is called).
"""
content = """- the BEAM -
How things really are we would like to know.
Does
Time
flow, is it elastic, or is it
atomized in instants hammered around the
clock's face? ...
- May Swenson"""
# Keep the read block size small so that we exercise the seek functionality
# in compressed file and not just in the internal buffer
read_block_size = 4
def setUp(self):
self._tempfiles = []
def tearDown(self):
for path in self._tempfiles:
if os.path.exists(path):
os.remove(path)
def _create_temp_file(self):
path = tempfile.NamedTemporaryFile(delete=False).name
self._tempfiles.append(path)
return path
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3'
'TODO: BEAM-5627')
def _create_compressed_file(self, compression_type, content):
file_name = self._create_temp_file()
if compression_type == CompressionTypes.BZIP2:
compress_factory = bz2.BZ2File
elif compression_type == CompressionTypes.GZIP:
compress_factory = gzip.open
else:
assert False, "Invalid compression type: %s" % compression_type
with compress_factory(file_name, 'wb') as f:
f.write(content)
return file_name
def test_seekable_enabled_on_read(self):
with open(self._create_temp_file(), 'rb') as f:
readable = CompressedFile(f)
self.assertTrue(readable.seekable)
def test_seekable_disabled_on_write(self):
with open(self._create_temp_file(), 'wb') as f:
writeable = CompressedFile(f)
self.assertFalse(writeable.seekable)
def test_seekable_disabled_on_append(self):
with open(self._create_temp_file(), 'ab') as f:
writeable = CompressedFile(f)
self.assertFalse(writeable.seekable)
def test_seek_set(self):
for compression_type in [CompressionTypes.BZIP2, CompressionTypes.GZIP]:
file_name = self._create_compressed_file(compression_type, self.content)
with open(file_name, 'rb') as f:
compressed_fd = CompressedFile(f, compression_type,
read_size=self.read_block_size)
reference_fd = BytesIO(self.content)
# Note: BytesIO's tell() reports out of bound positions (if we seek
# beyond the file), therefore we need to cap it to max_position
# _CompressedFile.tell() always stays within the bounds of the
# uncompressed content.
# Negative seek position argument is not supported for BytesIO with
# whence set to SEEK_SET.
for seek_position in (0, 1,
len(self.content)-1, len(self.content),
len(self.content) + 1):
compressed_fd.seek(seek_position, os.SEEK_SET)
reference_fd.seek(seek_position, os.SEEK_SET)
uncompressed_line = compressed_fd.readline()
reference_line = reference_fd.readline()
self.assertEqual(uncompressed_line, reference_line)
uncompressed_position = compressed_fd.tell()
reference_position = reference_fd.tell()
max_position = len(self.content)
reference_position = min(reference_position, max_position)
self.assertEqual(uncompressed_position, reference_position)
def test_seek_cur(self):
for compression_type in [CompressionTypes.BZIP2, CompressionTypes.GZIP]:
file_name = self._create_compressed_file(compression_type, self.content)
with open(file_name, 'rb') as f:
compressed_fd = CompressedFile(f, compression_type,
read_size=self.read_block_size)
reference_fd = BytesIO(self.content)
# Test out of bound, inbound seeking in both directions
# Note: BytesIO's seek() reports out of bound positions (if we seek
# beyond the file), therefore we need to cap it to max_position (to
# make it consistent with the old StringIO behavior
for seek_position in (-1, 0, 1,
len(self.content) // 2,
len(self.content) // 2,
-1 * len(self.content) // 2):
compressed_fd.seek(seek_position, os.SEEK_CUR)
reference_fd.seek(seek_position, os.SEEK_CUR)
uncompressed_line = compressed_fd.readline()
expected_line = reference_fd.readline()
self.assertEqual(uncompressed_line, expected_line)
reference_position = reference_fd.tell()
uncompressed_position = compressed_fd.tell()
max_position = len(self.content)
reference_position = min(reference_position, max_position)
reference_fd.seek(reference_position, os.SEEK_SET)
self.assertEqual(uncompressed_position, reference_position)
def test_read_from_end_returns_no_data(self):
for compression_type in [CompressionTypes.BZIP2, CompressionTypes.GZIP]:
file_name = self._create_compressed_file(compression_type, self.content)
with open(file_name, 'rb') as f:
compressed_fd = CompressedFile(f, compression_type,
read_size=self.read_block_size)
seek_position = 0
compressed_fd.seek(seek_position, os.SEEK_END)
expected_data = ''
uncompressed_data = compressed_fd.read(10)
self.assertEqual(uncompressed_data, expected_data)
def test_seek_outside(self):
for compression_type in [CompressionTypes.BZIP2, CompressionTypes.GZIP]:
file_name = self._create_compressed_file(compression_type, self.content)
with open(file_name, 'rb') as f:
compressed_fd = CompressedFile(f, compression_type,
read_size=self.read_block_size)
for whence in (os.SEEK_CUR, os.SEEK_SET, os.SEEK_END):
seek_position = -1 * len(self.content) - 10
compressed_fd.seek(seek_position, whence)
expected_position = 0
uncompressed_position = compressed_fd.tell()
self.assertEqual(uncompressed_position, expected_position)
seek_position = len(self.content) + 20
compressed_fd.seek(seek_position, whence)
expected_position = len(self.content)
uncompressed_position = compressed_fd.tell()
self.assertEqual(uncompressed_position, expected_position)
def test_read_and_seek_back_to_beginning(self):
for compression_type in [CompressionTypes.BZIP2, CompressionTypes.GZIP]:
file_name = self._create_compressed_file(compression_type, self.content)
with open(file_name, 'rb') as f:
compressed_fd = CompressedFile(f, compression_type,
read_size=self.read_block_size)
first_pass = compressed_fd.readline()
compressed_fd.seek(0, os.SEEK_SET)
second_pass = compressed_fd.readline()
self.assertEqual(first_pass, second_pass)
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3'
'TODO: BEAM-5627')
def test_tell(self):
lines = ['line%d\n' % i for i in range(10)]
tmpfile = self._create_temp_file()
with open(tmpfile, 'w') as f:
writeable = CompressedFile(f)
current_offset = 0
for line in lines:
writeable.write(line)
current_offset += len(line)
self.assertEqual(current_offset, writeable.tell())
with open(tmpfile) as f:
readable = CompressedFile(f)
current_offset = 0
while True:
line = readable.readline()
current_offset += len(line)
self.assertEqual(current_offset, readable.tell())
if not line:
break
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.