text
stringlengths 733
1.02M
| score
float64 0
0.27
|
---|---|
import csv,os,sys,coral,array
from RecoLuminosity.LumiDB import argparse,sessionManager,CommonUtil,idDealer,dbUtil,dataDML,revisionDML
NCOLS=4
def updateLSTrg(dbsession,runnum,perlsrawdata):
'''
input: perlsrawdata [(cmslsnum,normbitcount,normbitprescale),(cmslsnum,normbitcount,normbitprescale)...]
update lstrg set bitzerocount=:normbitcount,bitzeroprescale=:normbitprescale where runnum=:runnum and cmslsnum=:cmslsnum
'''
dataDef=[]
dataDef.append(('CMSLSNUM','unsigned int'))
dataDef.append(('BITZEROCOUNT','unsigned int'))
dataDef.append(('BITZEROPRESCALE','unsigned int'))
dataDef.append(('RUNNUM','unsigned int'))
dataDef.append(('CMSLSNUM','unsigned int'))
bulkinput=[]
dbsession.transaction().start(False)
db=dbUtil.dbUtil(dbsession.nominalSchema())
updateAction='BITZEROCOUNT=:bitzerocount,BITZEROPRESCALE=:bitzeroprescale'
updateCondition='RUNNUM=:runnum AND CMSLSNUM=:cmslsnum'
bindvarDef=[('bitzerocount','unsigned int'),('bitzeroprescale','unsigned int'),('runnum','unsigned int'),('cmslsnum','unsigned int')]
for (cmslsnum,normbitcount,normbitprescale) in perlsrawdata:
bulkinput.append([('bitzerocount',normbitcount),('bitzeroprescale',normbitprescale),('runnum',runnum),('cmslsnum',cmslsnum)])
db.updateRows('LSTRG',updateAction,updateCondition,bindvarDef,bulkinput)
#dbsession.transaction().rollback()
dbsession.transaction().commit()
def parseInputFile(ifilename):
perlsdata=[]
try:
csvfile=open(ifilename,'rb')
reader=csv.reader(csvfile,delimiter=',',skipinitialspace=True)
for row in reader:
if not row: continue
if len(row)!=NCOLS: continue
runnumStr=row[0].strip()
cmslsnumStr=row[1].strip()
normbitCountStr=row[2].strip()
normbitPrescStr=row[3].strip()
perlsdata.append( (int(cmslsnumStr),int(normbitCountStr),int(normbitPrescStr)) )
except Exception,e:
raise RuntimeError(str(e))
return perlsdata
def main(*args):
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description = "Lumi fake",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
allowedActions = ['v2']
parser.add_argument('action',choices=allowedActions,
help='command actions')
parser.add_argument('-c',dest='connect',action='store',
required=True,
help='connect string to lumiDB,optional',
default=None)
parser.add_argument('-P',dest='authpath',action='store',
required=True,
help='path to authentication file')
parser.add_argument('-r',dest='runnumber',action='store',
type=int,
required=True,
help='run number')
parser.add_argument('-i',dest='ifile',action='store',
required=True,
help='patch data file ')
parser.add_argument('--debug',dest='debug',action='store_true',
required=False,
help='debug ')
options=parser.parse_args()
os.environ['CORAL_AUTH_PATH'] = options.authpath
perlsrawdata=parseInputFile(options.ifile)
print perlsrawdata
msg=coral.MessageStream('')
msg.setMsgVerbosity(coral.message_Level_Error)
svc=sessionManager.sessionManager(options.connect,authpath=options.authpath,debugON=options.debug)
dbsession=svc.openSession(isReadOnly=False,cpp2sqltype=[('unsigned int','NUMBER(10)'),('unsigned long long','NUMBER(20)')])
if options.action=='v2':
updateLSTrg(dbsession,options.runnumber,perlsrawdata)
#elif options.action=='v1' :
# summaryidlsmap=insertLumiSummarydata(dbsession,options.runnumber,perlsrawdata,deliveredonly=options.deliveredonly)
# if perbunchrawdata:
# insertLumiDetaildata(dbsession,perlsrawdata,perbunchrawdata,summaryidlsmap)
del dbsession
del svc
if __name__=='__main__':
sys.exit(main(*sys.argv))
| 0.026534 |
#!/usr/bin/python
import inspect
import random
import sys
import unittest
sys.path.insert( 0, sys.path[0] + '/..' )
from metasmt.core import *
def skipIfNoSolver(obj):
if obj.solver is not None:
return lambda func: func
return unittest.skip("solver is unavailable")
class LogicTest( object ):
solver = None
def test_variable( self ):
p = new_variable()
solver = self.solver()
solver.solve()
def check_with_solver( self, solver, metasmt_function, specification ):
vars = tuple( new_variables( len( inspect.getargspec( specification ).args ) ) )
solver.assertion( metasmt_function( *vars ) )
self.assertTrue( solver.solve() )
self.assertTrue( specification( *tuple( map( solver.__getitem__, vars ) ) ) )
def check( self, metasmt_function, specification ):
self.check_with_solver( self.solver(), metasmt_function, specification )
def test_not( self ):
self.check( logic_not, lambda a: not a )
def test_equal( self ):
self.check( logic_equal, lambda a, b: a == b )
def test_nequal( self ):
self.check( logic_nequal, lambda a, b: a != b )
def test_implies( self ):
self.check( logic_implies, lambda a, b: a <= b )
def test_and( self ):
self.check( logic_and, lambda a, b: a and b )
solver = self.solver()
solver.assertion( logic_and( True, True, True ) )
self.assertTrue( solver.solve() )
solver.assertion( logic_and( True, False, True, True, False, True ) )
self.assertTrue( not solver.solve() )
def test_nand( self ):
self.check( logic_nand, lambda a, b: not( a and b ) )
def test_or( self ):
self.check( logic_or, lambda a, b: a or b )
solver = self.solver()
solver.assertion( logic_or( True, True, True ) )
self.assertTrue( solver.solve() )
solver.assertion( logic_or( True, False, True, True, False, True ) )
self.assertTrue( solver.solve() )
solver.assertion( logic_or( False, False, True, True, False, True ) )
self.assertTrue( solver.solve() )
solver.assertion( logic_or( False, False, False, False, False, False ) )
self.assertTrue( not solver.solve() )
def test_nor( self ):
self.check( logic_nor, lambda a, b: not( a or b ) )
def test_xor( self ):
self.check( logic_xor, lambda a, b: a != b )
def test_xnor( self ):
self.check( logic_xnor, lambda a, b: a == b )
def test_ite( self ):
self.check( logic_ite, lambda a, b, c: ( a and b ) or ( not a and c ) )
def invert( v, bitwidth = 32 ):
return v ^ ( ( 1 << bitwidth ) - 1 )
def twoscomp( v, bitwidth = 32 ):
if v & (1 << ( bitwidth - 1 ) ) == 0:
return v
else:
return -( invert( v - ( 1 << ( bitwidth - 1 ) ), bitwidth - 1 ) + 1 )
class BitvectorTest( object ):
solver = None
def check_with_solver( self, solver, metasmt_function, specification, bv = 32, rbv = 32, returns_bool = False, tbv = 32 ):
vars = new_bitvectors( bv, len( inspect.getargspec( metasmt_function ).args ) )
v = random.randint( 0, 2**rbv - 1 )
solver = boolector_solver()
if returns_bool:
solver.assertion( metasmt_function( *vars ) )
else:
solver.assertion( reduce( logic_and, [ logic_nequal( var, bv_uint[bv]( 0 ) ) for var in vars ], True ) )
solver.assertion( logic_equal( metasmt_function( *vars ), bv_uint[tbv]( v ) ) )
self.assertTrue( solver.solve() )
if specification:
if returns_bool:
self.assertTrue( specification( *tuple( map( solver.__getitem__, vars ) ) ) )
else:
self.assertTrue( specification( *tuple( map( solver.__getitem__, vars ) ) ) == v )
def check( self, metasmt_function, specification, bv = 32, rbv = 32, returns_bool = False, tbv = 32 ):
self.check_with_solver( self.solver(), metasmt_function, specification, bv, rbv, returns_bool, tbv )
def testConstants( self ):
solver = self.solver()
a = new_bitvector( 32 )
b = new_bitvector( 32 )
c = new_bitvector( 32 )
d = new_bitvector( 32 )
solver.assertion( logic_equal( a, bv_uint[32]( 10 ) ) )
solver.assertion( logic_equal( b, bv_sint[32]( -10 ) ) )
solver.assertion( logic_equal( c, bv_bin( "00000000000000000000000000001010" ) ) )
solver.assertion( logic_equal( d, bv_hex( "0000000A" ) ) )
self.assertTrue( solver.solve() )
self.assertTrue( solver[a] == -twoscomp( solver[b] ) )
self.assertTrue( solver[c] == solver[d] )
def testBVNot( self ):
self.check( bv_not, lambda a: a ^ 2**32-1 )
def testBVNeg( self ):
self.check( bv_neg, lambda a: -twoscomp( a, 32 ), rbv = 31 )
def testBVAnd( self ):
self.check( bv_and, lambda a, b: a & b )
def testBVNand( self ):
self.check( bv_nand, lambda a, b: invert( a & b ) )
def testBVOr( self ):
self.check( bv_or, lambda a, b: a | b )
def testBVNor( self ):
self.check( bv_nor, lambda a, b: invert( a | b ) )
def testBVXor( self ):
self.check( bv_xor, lambda a, b: a ^ b )
def testBVXnor( self ):
self.check( bv_xnor, lambda a, b: invert( a ^ b ) )
def testBVAdd( self ):
self.check( bv_add, lambda a, b: ( a + b ) % 2**32 )
def testBVMul( self ):
self.check( bv_mul, lambda a, b: ( a * b ) % 2**32 )
def testBVSub( self ):
self.check( bv_sub, lambda a, b: ( a - b ) % 2**32 )
def testBVSdiv( self ):
self.check( bv_sdiv, None )
def testBVSrem( self ):
self.check( bv_srem, None )
def testBVUdiv( self ):
self.check( bv_udiv, lambda a, b: ( a / b ) )
def testBVUrem( self ):
self.check( bv_urem, lambda a, b: ( a % b ) )
def testBVShl( self ):
self.check( bv_shl, None )
def testBVShr( self ):
self.check( bv_shr, None )
def testBVAshr( self ):
self.check( bv_shr, None )
def testBVComp( self ):
self.check( bv_comp, lambda a, b: a == b, returns_bool = True )
def testBVSlt( self ):
self.check( bv_slt, lambda a, b: twoscomp( a ) < twoscomp( b ), returns_bool = True )
def testBVSgt( self ):
self.check( bv_sgt, lambda a, b: twoscomp( a ) > twoscomp( b ), returns_bool = True )
def testBVSle( self ):
self.check( bv_sle, lambda a, b: twoscomp( a ) <= twoscomp( b ), returns_bool = True )
def testBVSge( self ):
self.check( bv_sge, lambda a, b: twoscomp( a ) >= twoscomp( b ), returns_bool = True )
def testBVUlt( self ):
self.check( bv_ult, lambda a, b: a < b, returns_bool = True )
def testBVUgt( self ):
self.check( bv_ugt, lambda a, b: a > b, returns_bool = True )
def testBVUle( self ):
self.check( bv_ule, lambda a, b: a <= b, returns_bool = True )
def testBVUge( self ):
self.check( bv_uge, lambda a, b: a >= b, returns_bool = True )
def testConcat( self ):
self.check( concat, lambda a, b: ( a << 16 ) + b, bv = 16 )
def testExtract( self ):
solver = self.solver()
a = new_bitvector( 32 )
solver.assertion( logic_equal( a, bv_uint[32]( random.randint( 0, 2**32 - 1 ) ) ) )
solver.assertion( logic_equal( a, reduce( concat, reversed([ extract( i, i, a ) for i in ( range( 32 ) ) ] ) ) ))
self.assertTrue( solver.solve() )
def testZeroExtend( self ):
solver = self.solver()
a = new_bitvector( 16 )
solver.assertion( logic_implies( logic_equal( concat( a, a ), zero_extend( 16, a ) ),
logic_equal( a, bv_uint[16]( 0 ) ) ) )
self.assertTrue( solver.solve() )
def testSignExtend( self ):
solver = self.solver()
a = new_bitvector( 16 )
solver.assertion( logic_implies( logic_equal( concat( a, a ), sign_extend( 16, a ) ),
logic_equal( a, bv_uint[16]( 0 ) ) ) )
self.assertTrue( solver.solve() )
for name, solver in available_solvers().items():
logic_name = 'LogicTest_'+name
bitvec_name = 'BitvectorTest_'+name
solver_fun = lambda self: solver()
globals()[logic_name] = type(logic_name, (LogicTest, unittest.TestCase), dict(solver=solver_fun ))
globals()[bitvec_name] = type('BitvectorTest_'+name, (BitvectorTest, unittest.TestCase), dict(solver=solver_fun))
if __name__ == "__main__":
random.seed()
unittest.main()
| 0.065164 |
import os
import stat
import signal
import subprocess
import constants
import time
import PyPR2
RECORDED_DATA_DIR = '/removable/recordings'
class ProcConduit:
def __init__( self ):
self.mannequin = None
self.recording = None
self.joyControl = None
if not os.path.exists( RECORDED_DATA_DIR ) or not os.path.isdir( RECORDED_DATA_DIR ):
print 'Create data recording directory', RECORDED_DATA_DIR
try:
os.makedirs( RECORDED_DATA_DIR )
os.chmod( RECORDED_DATA_DIR, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP )
os.chown( RECORDED_DATA_DIR, -1, 100 )
except:
print 'Unable to create data recording directory', RECORDED_DATA_DIR
self.setCallbacks()
def spawnProc( self, cmd ):
# The os.setsid() is passed in the argument preexec_fn so
# it's run after the fork() and before exec() to run the shell.
pro = subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid )
return pro
def killProc( self, proc ):
if not proc or not isinstance( proc, subprocess.Popen ):
print 'Input is not a process object'
return
os.killpg( proc.pid, signal.SIGINT ) # Send the signal to all the process groups
def setToMannequinMode( self, isYes ):
if not isinstance( isYes, bool ):
print 'Expect a boolean input'
return
elif isYes:
if self.mannequin:
print 'Already in mannequin mode'
else:
self.mannequin = self.spawnProc( 'roslaunch pr2_mannequin_mode pr2_mannequin_mode.launch > /dev/null 2>&1' )
PyPR2.say( "Start mannequin mode." )
else:
if self.mannequin:
self.killProc( self.mannequin )
self.mannequin = None
subprocess.call( 'rosservice call /pr2_controller_manager/switch_controller \
"{start_controllers: [\'r_arm_controller\',\'l_arm_controller\'], \
stop_controllers: [], strictness: 2}"', shell=True )
PyPR2.say( "Stop mannequin mode." )
def setProjectorOff( self, isYes ):
if not isinstance( isYes, bool ):
print 'Expect a boolean input'
return
elif isYes:
subprocess.call( 'rosrun dynamic_reconfigure dynparam set camera_synchronizer_node projector_mode 1', shell=True )
else:
subprocess.call( 'rosrun dynamic_reconfigure dynparam set camera_synchronizer_node projector_mode 2', shell=True )
def setBaseScanIntensityOn( self ):
#PyPR2.say( "Turning on base scan intensity, please wait" )
subprocess.call( 'rosrun dynamic_reconfigure dynparam set /base_hokuyo_node skip 2', shell=True )
time.sleep( 0.5 )
subprocess.call( 'rosrun dynamic_reconfigure dynparam set /base_hokuyo_node intensity True', shell=True )
time.sleep( 0.5 )
subprocess.call( 'rosrun dynamic_reconfigure dynparam set /base_hokuyo_node allow_unsafe_settings True', shell=True )
time.sleep( 0.5 )
subprocess.call( 'rosrun dynamic_reconfigure dynparam set /base_hokuyo_node max_ang 2.2689', shell=True )
#PyPR2.say( "base scan intensity should be on" )
def startDataRecording( self, mode, filename = "" ):
cmd = 'rosbag record -b 1024 '
str = ''
if mode & constants.REC_CAM:
#cmd = cmd + '-e "/(.*)_stereo/(left|right)/image_rect_color" '
cmd = cmd + '-e "/wide_stereo/left/image_rect_color" ' # record only one camera data
str = '_cam'
if mode & constants.REC_KINECT:
cmd = cmd + '"/camera/rgb/image_rect_color" "/camera/depth_registered/image_rect" '
str = '_kinect'
if mode & constants.REC_SCAN:
cmd = cmd + '-e "/(.*)_scan$" '
str = str + '_laser'
if mode & constants.REC_IMU:
cmd = cmd + '"/torso_lift_imu/data" '
str = str + '_imu'
if mode & constants.REC_JOINTS:
cmd = cmd + '"/joint_states" '
str = str + '_joint'
if mode & constants.REC_TF:
cmd = cmd + '"/tf" '
str = str + '_tf'
if filename == "":
cmd = cmd + '--duration=1m --split -O %s/%s%s_data.bag' % \
(RECORDED_DATA_DIR, time.strftime( "%Y%m%d_%H%M", time.localtime()), str)
else:
cmd = cmd + '--duration=1m --split -O %s/%s.bag' % \
(RECORDED_DATA_DIR, filename)
if self.recording:
self.killProc( self.recording )
self.recording = self.spawnProc( cmd )
PyPR2.say( "Start data recording!" )
def stopDataRecording( self ):
if self.recording:
self.killProc( self.recording )
self.recording = None
PyPR2.say( "Stopped data recording!" )
def startJoystickControl( self ):
if self.joyControl:
print 'already in joystick control mode'
return
self.joyControl = self.spawnProc( 'roslaunch pr2_teleop teleop_joystick.launch > /dev/null 2>&1' )
PyPR2.say( "Start joystick control." )
def stopJoystickControl( self ):
if self.joyControl:
self.killProc( self.joyControl )
self.joyControl = None
PyPR2.say( "Stopped joystick control." )
def setCallbacks( self ):
PyPR2.setProjectorOff = self.setProjectorOff
PyPR2.setToMannequinMode = self.setToMannequinMode
PyPR2.startDataRecording = self.startDataRecording
PyPR2.stopDataRecording = self.stopDataRecording
PyPR2.startJoystickControl = self.startJoystickControl
PyPR2.stopJoystickControl = self.stopJoystickControl
PyPR2.turnOnBaseScanIntensity = self.setBaseScanIntensityOn
def fini( self ):
self.stopJoystickControl()
self.stopDataRecording()
self.setToMannequinMode( False )
| 0.031091 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as python_logging
import re
from oslo_log import log as logging
import urllib
LOG = logging.getLogger(__name__)
VALUE = 'VALUE'
SUBITEM = 'SUBITEM'
SUBTREE = 'SUBTREE'
FORWARD = 'FORWARD'
COLLECTION = 'COLLECTION'
DELEGATE = 'DELEGATE'
def _split_lg_path(path_prefix, path):
if len(path) == 0:
return (None, None, path_prefix)
else:
return (path[0], path[1:],
# pylint: disable=no-member
"%s/%s" % (path_prefix, urllib.quote(path[0])))
def _get_lg_local_info_recurse(obj, cls, path_prefix):
if cls == LookingGlassMixin:
return {}
result = cls.get_lg_local_info(obj, path_prefix)
assert isinstance(result, dict)
for base in cls.__bases__:
if issubclass(base, LookingGlassMixin):
result.update(
_get_lg_local_info_recurse(obj, base, path_prefix))
return result
def _get_lg_map_recurse(obj, cls):
if cls == LookingGlassMixin:
return {}
result = cls.get_lg_map(obj)
for base in cls.__bases__:
if issubclass(base, LookingGlassMixin):
result.update(_get_lg_map_recurse(obj, base))
else:
LOG.debug("not recursing into %s", base)
return result
def _lookup_path(my_dict, path):
'''lookup path in dict'''
assert isinstance(path, (list, tuple))
if len(path) == 0:
return my_dict
# len(path)>0
if not isinstance(my_dict, dict):
raise KeyError(path[0])
else:
return _lookup_path(my_dict[path[0]], path[1:])
def get_lg_prefixed_path(path_prefix, path_items):
fmt = "%s" + ('/%s' * len(path_items))
# pylint: disable=no-member
quoted_path_items = [urllib.quote(path_item) for path_item in path_items]
quoted_path_items.insert(0, path_prefix)
return fmt % tuple(quoted_path_items)
class LookingGlassMixin(object):
def _get_lg_map(self):
# not to be overridden: calls get_lg_map, on each of the super classes
# and merge the result in a dict
return _get_lg_map_recurse(self, self.__class__)
def get_lg_map(self):
"""Main looking glass hook for LG objects
This can be overridden by looking glass objects to indicate looking
glass information items for this objects.
:returns: a dict mapping a <subpath> to a (<lg_map_type>,<hook>) tuple
if lg_map_type is VALUE, then the looking glass information for
<subpath> will be <hook>
if lg_map_type is SUBITEM, then <hook> is supposed to be a function
and the looking glass information for <subpath> will be the result
of calling hook()
if lg_map_type is SUBTREE, then <hook> is supposed to be a function and
the looking glass information for <subpath> will be the result of
calling hook(path_prefix), but this information will only be
produced if the <subpath> is queried (not produced if the full
object is queried)
if lg_map_type is FORWARD, then <hook> is supposed to be a looking
glass object and the looking glass information for <subpath>
will be the looking glass information for <subpath> of object
<hook>
if lg_map_type is DELEGATE, then <hook> is supposed to be a looking
glass object and the looking glass information for <subpath> will
be the full looking glass information for object <hook>
if lg_map_type is COLLECTION, then <hook> is supposed to be a tuple of
functions (list_callback,target_callback). list_callback() is
expected
to return a list of string, each string identifying a looking
glass object target_callback(string) is expected to return the
looking glass object corresponding to <string>
if *self* is directly queried, the information returned is just a
list of dict containing "href" values pointing to each object in
the collection
if a <subpath> is queried, the information returned is the
looking glass information for the object corresponding to
<subpath>
"""
return {}
def _get_lg_local_info(self, path_prefix):
# not to be overridden: calls get_lg_local_info, on each of
# the super classes and merge the result in a dict
return _get_lg_local_info_recurse(self, self.__class__, path_prefix)
def get_lg_local_info(self, path_prefix):
"""Deeper hook for LG objects
Can be overridden by looking glass objects instead of get_lg_map
:param path_prefix: the URL prefix that was used to reach *self*
through the looking glass
:returns: a dict that will be serialized as JSON and passed to the
looking glass client, either as is, or if a sub path was queried,
the dict value corresponding to the first item of the path
"""
return {}
def get_looking_glass_info(self, path_prefix="", path=None):
"""Internal method to build the looking glass information
Builds the LG information for *self* based on the looking glass map.
*not* to be overridden by looking glass objects
"""
if path is None:
path = []
(first_segment,
path_reminder,
new_path_prefix) = _split_lg_path(path_prefix, path)
lg_map = self._get_lg_map()
if first_segment in lg_map:
(mapping_type, mapping_target) = lg_map[first_segment]
LOG.debug("Delegation for path_item '%s': %s:%s ",
first_segment, mapping_type, mapping_target)
if mapping_type == VALUE:
return mapping_target
if mapping_type == FORWARD:
LOG.debug(
" Forwarded '%s' to target %s...", path, mapping_target)
if not isinstance(mapping_target, LookingGlassMixin):
LOG.error("Delegation target for '%s' at '%s' does not "
"implement LookingGlassMixin!",
first_segment, new_path_prefix)
raise NoSuchLookingGlassObject(new_path_prefix,
first_segment)
return mapping_target.get_looking_glass_info(path_prefix, path)
if mapping_type == FORWARD:
LOG.debug(
" Forwarded '%s' to target %s...", path, mapping_target)
if not isinstance(mapping_target, LookingGlassMixin):
LOG.error("Delegation target for '%s' at '%s' does not "
"implement LookingGlassMixin!",
first_segment, new_path_prefix)
raise NoSuchLookingGlassObject(new_path_prefix,
first_segment)
return mapping_target.get_looking_glass_info(path_prefix, path)
elif mapping_type == DELEGATE:
LOG.debug(
" Delegated '%s' to delegation target %s ...",
path, mapping_target)
if not isinstance(mapping_target, LookingGlassMixin):
LOG.error("Delegation target for '%s' at '%s' does not "
"implement LookingGlassMixin!",
first_segment, new_path_prefix)
raise NoSuchLookingGlassObject(new_path_prefix,
first_segment)
return mapping_target.get_looking_glass_info(new_path_prefix,
path_reminder)
elif mapping_type == SUBITEM:
LOG.debug(" Sub-item callback: %s", first_segment)
try:
return _lookup_path(mapping_target(), path_reminder)
except KeyError as e:
raise NoSuchLookingGlassObject(new_path_prefix, str(e))
elif mapping_type == SUBTREE:
LOG.debug(" Subtree callback: %s(...)", first_segment)
try:
return _lookup_path(mapping_target(new_path_prefix),
path_reminder)
except KeyError as e:
raise NoSuchLookingGlassObject(new_path_prefix, str(e))
elif mapping_type == COLLECTION:
LOG.debug(" Collection callback...")
(list_callback, target_callback) = mapping_target
(second_segment, path_reminder, newer_path_prefix) = \
_split_lg_path(new_path_prefix, path_reminder)
if second_segment is None:
LOG.debug(" Getting list elements: %s", list_callback)
result = []
for x in list_callback():
x["href"] = get_lg_prefixed_path(path_prefix,
[first_segment,
x["id"]])
result.append(x)
return result
else:
LOG.debug(" Callback -> resolve subItem '%s' with %s "
"and follow up get_looking_glass_info(...'%s')",
second_segment, target_callback, path_reminder)
try:
# TODO(tmorin): catch errors
target = target_callback(second_segment)
if target is None:
LOG.error("No delegation target for '%s' at '%s' ",
second_segment, new_path_prefix)
raise NoSuchLookingGlassObject(new_path_prefix,
second_segment)
if not isinstance(target, LookingGlassMixin):
LOG.error("Delegation target for '%s' at '%s' does"
" not implement LookingGlassMixin (%s)!",
second_segment, new_path_prefix,
type(target))
raise NoSuchLookingGlassObject(new_path_prefix,
second_segment)
return target.get_looking_glass_info(newer_path_prefix,
path_reminder)
except KeyError:
raise NoSuchLookingGlassObject(new_path_prefix,
second_segment)
# firt_segment is None or is not in our map
# let's build LookingGlassLocalInfo
info = self._get_lg_local_info(path_prefix)
for (path_item, (mapping_type, mapping_target)) in lg_map.items():
if path_item in info:
LOG.warning("overriding '%s', present both in "
"LookingGlassLocalInfo and LookingGlassMixin map",
path_item)
if mapping_type in (FORWARD, DELEGATE, SUBTREE, COLLECTION):
info[path_item] = {"href": get_lg_prefixed_path(path_prefix,
[path_item])
}
elif mapping_type == SUBITEM:
LOG.debug(" Subitem => callback %s(...)", mapping_target)
# TODO(tmorin): catch errors
info[path_item] = mapping_target()
elif mapping_type == VALUE:
info[path_item] = mapping_target
else:
LOG.warning("LGMap not processed for %s", path_item)
if first_segment is None:
return info
else:
try:
return _lookup_path(info, path)
except KeyError as e:
raise NoSuchLookingGlassObject(new_path_prefix, str(e))
LOG.warning("Looking glass did not found a looking-glass object for"
" this path...")
return None
class NoSuchLookingGlassObject(Exception):
def __init__(self, path_prefix, path):
super(NoSuchLookingGlassObject, self).__init__()
assert isinstance(path_prefix, str)
self.path_prefix = path_prefix
assert isinstance(path, str)
self.path = path
def __repr__(self):
return "No such looking glass object: %s at %s" % (self.path,
self.path_prefix)
# Looking glass reference URLs
root = ""
references = {}
def set_references_root(url_prefix):
global root
root = url_prefix
def set_reference_path(reference, path):
references[reference] = path
def get_absolute_path(reference, path_prefix, path=None):
if path is None:
path = []
index = path_prefix.find(root)
absolute_base_url = path_prefix[:index + len(root)]
return get_lg_prefixed_path(absolute_base_url,
references[reference] + path)
class LookingGlassLogHandler(python_logging.Handler):
"""Looking Glass LogHandler storing logs to make them available in LG
This log handler simply stores the last <max_size> messages of importance
above <level>. These messages can be retrieved with .get_records().
"""
def __init__(self, level=logging.WARNING, max_size=100):
super(LookingGlassLogHandler, self).__init__(level)
self.records = []
self.max_size = max_size
self.setFormatter(
python_logging.Formatter('%(asctime)s - %(levelname)s - '
'%(message)s'))
def emit(self, record):
# expand the log message now and free references to the arguments
record.msg = record.getMessage().replace('"', "'")
record.args = []
self.records.insert(0, record)
del self.records[self.max_size:]
def __len__(self):
return len(self.records)
def get_records(self):
return self.records
def reset_local_lg_logs(self):
del self.records[:]
class LookingGlassLocalLogger(LookingGlassMixin):
"""LookingGlass Mixin making self.log a LookingGlass log catcher
For objects subclassing this class, self.log will be a logger derived from
<name> based on the existing logging configuration, but with an additional
logger using LookingGlassLogHandler.
This additional logger is used to make the last <n> records (above WARNING)
available through the looking glass
"""
def __init__(self, append_to_name=""):
try:
self.lg_log_handler
except AttributeError:
self.lg_log_handler = LookingGlassLogHandler()
name = self.__module__
if append_to_name:
name += "." + append_to_name
elif hasattr(self, 'instance_id'):
# pylint: disable=no-member
name += ".%d" % self.instance_id
elif hasattr(self, 'name'):
# pylint: disable=no-member
name += ".%s" % re.sub("[. ]", "-", self.name).lower()
self.log = logging.getLogger(name)
self.log.logger.addHandler(self.lg_log_handler)
def get_lg_map(self):
return {"logs": (SUBTREE, self.get_logs)}
def get_logs(self, path_prefix):
return [{'level': record.levelname,
'time': self.lg_log_handler.formatter.formatTime(record),
'message': record.msg}
for record in self.lg_log_handler.get_records()]
def _reset_local_lg_logs(self):
self.lg_log_handler.reset_local_lg_logs()
| 0 |
#! /usr/bin/env python
from distutils.core import setup
setup(name = 'smhcluster',
version = '0.1.0',
description = 'Cluster for Near-Duplicate Detection with Simhash',
url = 'http://github.com/seomoz/simhash-cluster',
author = 'Dan Lecocq',
author_email = 'dan@seomoz.org',
packages = ['smhcluster', 'smhcluster.adapters'],
package_dir = {
'smhcluster': 'smhcluster',
'smhcluster.adapters': 'smhcluster/adapters'
},
scripts = [
'bin/simhash-master',
'bin/simhash-slave'
],
dependencies = [
'simhash', # For obvious reasons
'boto', # For persistence to S3
'bottle', # For the HTTP adapter
'gevent', # For non-blocking goodness
'requests', # For making real http requests
'zerorpc' # For RPC with gevent, zeromq
],
classifiers = [
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP'
],
)
| 0.034996 |
from params import *
from numpy import sin, cos
x,y = 0.3, 0 # coords of the toe tip
def show(anim_file, state_log,tau):
f = open(anim_file,'w')
f.write("<html>\n<body>\n<svg width='1000' height='650'>\n")
draw_line(f,[((x,y),(x+L3*cos(b),y+L3*sin(b))) for (x,y,_,b,_) in state_log ],5,'blue',tau)
draw_line(f,[((x+L3*cos(b),y+L3*sin(b)),(x+L3*cos(b)+L2*cos(b+g),y+L3*sin(b)+L2*sin(b+g)))
for (x,y,_,b,g) in state_log ],7,'rgb(50,50,200)',tau)
draw_line(f,[((x+L3*cos(b)+L2*cos(b+g),y+L3*sin(b)+L2*sin(b+g)),
(x+L3*cos(b)+L2*cos(b+g)+2*L1*cos(a+b+g),y+L3*sin(b)+L2*sin(b+g)+2*L1*sin(a+b+g)))
for (x,y,a,b,g) in state_log ],9,'rgb(70,70,150)',tau)
f.write("</svg>\n</body>\n</html>")
f.close()
def draw_line(f, points_list, width, color, tau):
f.write("\t<line x1='%i' y1='%i' x2='%i' y2='%i' \
style='stroke:%s;stroke-width:%i' >\n" % (0,0,0,0,color,width))
T = 0
for xy1,xy2 in points_list:
x1,y1 = px(xy1)
x2,y2 = px(xy2)
f.write("\t\t<set attributeName='x1' attributeType='XML'\n \
to='%i' begin='%.2fs' />\n" %(x1,T) ),
f.write("\t\t<set attributeName='y1' attributeType='XML'\n \
to='%i' begin='%.2fs' />\n" %(y1,T) ),
f.write("\t\t<set attributeName='x2' attributeType='XML'\n \
to='%i' begin='%.2fs' />\n" %(x2,T) ),
f.write("\t\t<set attributeName='y2' attributeType='XML'\n \
to='%i' begin='%.2fs' />\n" %(y2,T) ),
T += tau
f.write("\t</line>\n")
def px(xy):
return ( 100+round(xy[0]*1500), 600-round(xy[1]*1500) )
| 0.066711 |
# Winsford ASC Google AppEngine App
# swimmer_parser.py
# Code to help scraping data from HTML tables, particularly
# the style of tables used at swimming.org
#
# Copyright (C) 2014 Oliver Wright
# oli.wright.github@gmail.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program (file LICENSE); if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.import logging
from lxml import html
from lxml import etree
import StringIO
import helpers
import logging
class Cell():
def __init__(self, text, link):
self.text = text
self.link = link
def __str__(self):
return self.text
def __int__(self):
return int( self.text )
def __bool__(self):
if self.text is None:
return False
else:
return True
def ReadTableCell( element ):
# The contents of the cell can be either plain text
# or a hyperlink.
# This version will parse both versions but will just
# return the plain text
txt = helpers.Trim( element.text )
if txt is None:
# Look for a hyperlink
element = element.find( path="a" )
if element is None:
return Cell( text = None, link = None )
else:
return Cell( text = helpers.Trim( element.text ), link = element.get( key="href" ) )
return Cell( text = txt, link = None )
#
# Iterator class for iterating over the rows of a html table.
#
# You pass in the headers that your interested in, in the order that you would
# like to get them.
#
# Each row that you iterate over will return a list containing the same number
# of elements as in the headers_of_interest. The contents of the list will
# correspond to the data in that row for that header.
#
# Any missing data will be None in the array.
#
class TableRows:
def __init__(self, table, headers_of_interest):
self.body = table.find( "tbody" )
self.heading_types = []
header_row = self.body.find( "tr" )
self.row = header_row
self.num_headers_of_interest = len( headers_of_interest )
for element in header_row.iterchildren( tag="th" ):
heading = helpers.Trim( element.text )
heading_type = -1
idx = 0
for interesting_heading in headers_of_interest:
if heading == interesting_heading:
#logging.info( "Found interesting header: " + heading )
heading_type = idx
break;
#logging.info( "No match: " + heading + ", " + interesting_heading )
idx = idx + 1
#if heading_type == -1:
#logging.info( "Uninteresting header: " + heading )
self.heading_types.append( heading_type )
def __iter__(self):
return self
def next(self):
self.row = self.row.getnext()
while self.row != None and self.row.tag != "tr":
self.row = self.row.getnext()
if self.row is None:
raise StopIteration
else:
# Iterate over the data elements in the row, pulling out the data
# for the columns that we're interested in
row_data = [None] * self.num_headers_of_interest;
column = 0
for element in self.row.iterchildren( tag="td" ):
interested_data_type = self.heading_types[ column ];
if interested_data_type != -1:
row_data[ interested_data_type ] = ReadTableCell( element )
column = column + 1
return row_data
# You pass in a lxml.html.HtmlElement for the root of the table, and the column headers that you're
# interested in, and it outputs a 2d array containing the parsed data.
def ParseTable( table, headers_of_interest ):
parsed_rows = []
for row in TableRows( table, headers_of_interest ):
parsed_rows.append( row )
return parsed_rows
| 0.027342 |
#=======================================================================
# bootstrap.py
#=======================================================================
from machine import State
#from pydgin.storage import Memory
EMULATE_GEM5 = False
EMULATE_SIMIT = True
# Currently these constants are set to match gem5
memory_size = 2**27
page_size = 8192
if EMULATE_SIMIT:
memory_size = 0xc0000000 + 1
MAX_ENVIRON = 1024 * 16
# MIPS stack starts at top of kuseg (0x7FFF.FFFF) and grows down
#stack_base = 0x7FFFFFFF
stack_base = memory_size-1 # TODO: set this correctly!
#-----------------------------------------------------------------------
# syscall_init
#-----------------------------------------------------------------------
#
# MIPS Memory Map (32-bit):
#
# 0xC000.0000 - Mapped (kseg2) - 1GB
# 0xA000.0000 - Unmapped uncached (kseg1) - 512MB
# 0x8000.0000 - Unmapped cached (kseg0) - 512MB
# 0x0000.0000 - 32-bit user space (kuseg) - 2GB
#
def syscall_init( mem, entrypoint, breakpoint, argv, envp, debug ):
#---------------------------------------------------------------------
# memory map initialization
#---------------------------------------------------------------------
# TODO: for multicore allocate 8MB for each process
#proc_stack_base[pid] = stack_base - pid * 8 * 1024 * 1024
# top of heap (breakpoint) # TODO: handled in load program
# memory maps: 1GB above top of heap
# mmap_start = mmap_end = break_point + 0x40000000
#---------------------------------------------------------------------
# stack argument initialization
#---------------------------------------------------------------------
# http://articles.manugarg.com/aboutelfauxiliaryvectors.html
#
# contents size
#
# 0x7FFF.FFFF [ end marker ] 4 (NULL)
# [ environment str data ] >=0
# [ arguments str data ] >=0
# [ padding ] 0-16
# [ auxv[n] data ] 8 (AT_NULL Vector)
# 8*x
# [ auxv[0] data ] 8
# [ envp[n] pointer ] 4 (NULL)
# 4*x
# [ envp[0] pointer ] 4
# [ argv[n] pointer ] 4 (NULL)
# 4*x
# [ argv[0] pointer ] 4 (program name)
# stack ptr-> [ argc ] 4 (size of argv)
#
# (stack grows down!!!)
#
# 0x7F7F.FFFF < stack limit for pid 0 >
#
# auxv variables initialized by gem5, are these needed?
#
# - PAGESZ: system page size
# - PHDR: virtual addr of program header tables
# (for statically linked binaries)
# - PHENT: size of program header entries in elf file
# - PHNUM: number of program headers in elf file
# - AT_ENRTY: program entry point
# - UID: user ID
# - EUID: effective user ID
# - GID: group ID
# - EGID: effective group ID
# TODO: handle auxv, envp variables
auxv = []
if EMULATE_GEM5:
argv = argv[1:]
argc = len( argv )
def sum_( x ):
val = 0
for i in x:
val += i
return val
# calculate sizes of sections
# TODO: parameterize auxv/envp/argv calc for variable int size?
stack_nbytes = [ 4, # end mark nbytes (sentry)
sum_([len(x)+1 for x in envp]), # envp_str nbytes
sum_([len(x)+1 for x in argv]), # argv_str nbytes
0, # padding nbytes
8*(len(auxv) + 1), # auxv nbytes
4*(len(envp) + 1), # envp nbytes
4*(len(argv) + 1), # argv nbytes
4 ] # argc nbytes
if EMULATE_SIMIT:
stack_nbytes[4] = 0 # don't to auxv for simit
def round_up( val ):
alignment = 16
return (val + alignment - 1) & ~(alignment - 1)
# calculate padding to align boundary
# NOTE: MIPs approach (but ignored by gem5)
#stack_nbytes[3] = 16 - (sum_(stack_nbytes[:3]) % 16)
# NOTE: gem5 ARM approach
stack_nbytes[3] = round_up( sum_(stack_nbytes) ) - sum_(stack_nbytes)
if EMULATE_SIMIT:
stack_nbytes[3] = 0
def round_down( val ):
alignment = 16
return val & ~(alignment - 1)
# calculate stack pointer based on size of storage needed for args
# TODO: round to nearest page size?
stack_ptr = round_down( stack_base - sum_( stack_nbytes ) )
if EMULATE_SIMIT:
stack_ptr = stack_base - MAX_ENVIRON
offset = stack_ptr + sum_( stack_nbytes )
# FIXME: this offset seems really wrong, but this is how gem5 does it!
if EMULATE_GEM5:
offset = stack_base
print "XXX", offset
stack_off = []
for nbytes in stack_nbytes:
offset -= nbytes
stack_off.append( offset )
# FIXME: this is fails for GEM5's hacky offset...
if not EMULATE_GEM5:
assert offset == stack_ptr
if debug.enabled( 'bootstrap' ):
print 'stack base', hex( stack_base )
print 'stack min ', hex( stack_ptr )
print 'stack size', stack_base - stack_ptr
print
print 'sentry ', stack_nbytes[0]
print 'env d ', stack_nbytes[1]
print 'arg d ', stack_nbytes[2]
print 'padding', stack_nbytes[3]
print 'auxv ', stack_nbytes[4]
print 'envp ', stack_nbytes[5]
print 'argv ', stack_nbytes[6]
print 'argc ', stack_nbytes[7]
# utility functions
def str_to_mem( mem, val, addr ):
for i, char in enumerate(val+'\0'):
mem.write( addr + i, 1, ord( char ) )
return addr + len(val) + 1
def int_to_mem( mem, val, addr ):
# TODO properly handle endianess
for i in range( 4 ):
mem.write( addr+i, 1, (val >> 8*i) & 0xFF )
return addr + 4
# write end marker to memory
int_to_mem( mem, 0, stack_off[0] )
# write environment strings to memory
envp_ptrs = []
offset = stack_off[1]
for x in envp:
envp_ptrs.append( offset )
offset = str_to_mem( mem, x, offset )
assert offset == stack_off[0]
# write argument strings to memory
argv_ptrs = []
offset = stack_off[2]
for x in argv:
argv_ptrs.append( offset )
offset = str_to_mem( mem, x, offset )
assert offset == stack_off[1]
# write auxv vectors to memory
offset = stack_off[4]
if not EMULATE_SIMIT:
for type_, value in auxv + [(0,0)]:
offset = int_to_mem( mem, type_, offset )
offset = int_to_mem( mem, value, offset )
assert offset == stack_off[3]
# write envp pointers to memory
offset = stack_off[5]
for env in envp_ptrs + [0]:
offset = int_to_mem( mem, env, offset )
assert offset == stack_off[4]
# write argv pointers to memory
offset = stack_off[6]
for arg in argv_ptrs + [0]:
offset = int_to_mem( mem, arg, offset )
assert offset == stack_off[5]
# write argc to memory
offset = stack_off[7]
offset = int_to_mem( mem, argc, offset )
assert offset == stack_off[6]
# write zeros to bottom of stack
# TODO: why does gem5 do this?
offset = stack_off[7] - 1
while offset >= stack_ptr:
mem.write( offset, 1, ord( '\0' ) )
offset -= 1
# initialize processor state
state = State( mem, debug, reset_addr=0x1000 )
if debug.enabled( 'bootstrap' ):
print '---'
#print 'argc = %d (%x)' % ( argc, stack_off[-1] )
#for i, ptr in enumerate(argv_ptrs):
# print 'argv[%2d] = %x (%x)' % ( i, argv_ptrs[i], stack_off[-2]+4*i ),
# print len( argv[i] ), argv[i]
#print 'argd = %s (%x)' % ( argv[0], stack_off[-6] )
print '---'
print 'envd-base', hex(stack_off[-7])
print 'argd-base', hex(stack_off[-6])
print 'auxv-base', hex(stack_off[-4])
print 'envp-base', hex(stack_off[-3])
print 'argv-base', hex(stack_off[-2])
print 'argc-base', hex(stack_off[-1])
print 'STACK_PTR', hex( stack_ptr )
# TODO: where should this go?
#state.pc = entrypoint
state.breakpoint = breakpoint
# initialize processor registers
state.rf[ 0 ] = 0 # ptr to func to run when program exits, disable
state.rf[ 1 ] = stack_off[6] # argument 1 reg = argv ptr addr
state.rf[ 2 ] = stack_off[5] # argument 2 reg = envp ptr addr
if EMULATE_SIMIT:
state.rf[ 1 ] = argc # argument 1 reg = argc
state.rf[ 2 ] = stack_off[6] # argument 2 reg = argv ptr addr
state.rf[ 13 ] = stack_ptr # stack pointer reg
state.rf[ 15 ] = entrypoint # program counter
if debug.enabled( 'bootstrap' ):
state.rf.print_regs( per_row=4 )
print '='* 20, 'end bootstrap', '='*20
return state
| 0.029175 |
from .. import api
from . import bp
@bp.route("/info")
def get_info():
return api.success_response("info", {})
@bp.route("/api/namespaces")
def get_namespaces():
namespaces = api.list_namespaces()
content = [ns.metadata.name for ns in namespaces.items]
return api.success_response("namespaces", content)
@bp.route("/api/storageclasses")
def get_storageclasses():
scs = api.list_storageclasses()
content = [sc.metadata.name for sc in scs.items]
return api.success_response("storageClasses", content)
@bp.route("/api/storageclasses/default")
def get_default_storageclass():
scs = api.list_storageclasses()
for sc in scs.items:
annotations = sc.metadata.annotations
if annotations is None:
continue
# List of possible annotations
keys = [
"storageclass.kubernetes.io/is-default-class",
"storageclass.beta.kubernetes.io/is-default-class", # GKE
]
for key in keys:
default_sc_annotation = annotations.get(key, "false")
if default_sc_annotation == "true":
return api.success_response(
"defaultStorageClass", sc.metadata.name
)
# No StorageClass is default
return api.success_response("defaultStorageClass", "")
| 0 |
# -*- coding: utf-8 -*-
# (C) 2013-2015 Muthiah Annamalai
#
# This file is part of 'open-tamil' package tests
#
# setup the paths
from opentamiltests import *
from ngram.Corpus import Corpus
from ngram.LetterModels import *
from ngram.WordModels import *
import tamil.utf8 as utf8
class WordsNGram(unittest.TestCase):
def test_basic(self):
#WordModels
word = u"அருஞ்சொற்பொருள்"
self.assertEqual( get_ngram_groups( word, 1), tamil.utf8.get_letters(word) )
self.assertEqual( get_ngram_groups( word, 2), [u"அரு",u"ருஞ்", u"ஞ்சொ", u"சொற்", u"ற்பொ", u"பொரு",u"ருள்"] )
self.assertEqual( get_ngram_groups( word, 3), [u"அருஞ்",u"ருஞ்சொ",u"ஞ்சொற்", u"சொற்பொ",u"ற்பொரு",u"பொருள்"] )
self.assertEqual( get_ngram_groups( word, 8), [ word ])
self.assertEqual( get_ngram_groups( word, 9), [ word ])
class Letters(unittest.TestCase):
def test_basic_unigram_counts(self):
z = Corpus("data/ex.unicode")
for letter in z.next_tamil_letter():
#if ( LINUX ): print(letter)
pass
# LetterModels
q = Unigram( "data/ex.unicode" )
q.frequency_model( )
if not PYTHON3:
#if ( LINUX ): print(unicode(q))
pass
else:
#if ( LINUX ): print( q )
pass
self.assertEqual( q.letter[u"ஷை"] + q.letter[u"சி"] , q.letter[u"ந"] )
del z, q
def test_bigram_counts(self):
q=Bigram("data/ex.unicode")
q.language_model(verbose=(False and LINUX)) #suppress output
self.assertEqual( q.letter2[u"த்"][u"து"] , 7 )
self.assertEqual( q.letter2[u"சி"][u"சி"] , 0 )
if __name__ == '__main__':
unittest.main()
| 0.030636 |
from urllib import unquote
from urlparse import urlparse
try:
from urlparse import parse_qsl
except ImportError: # pragma: no cover
from cgi import parse_qsl # noqa
from . import kwdict
def _parse_url(url):
scheme = urlparse(url).scheme
schemeless = url[len(scheme) + 3:]
# parse with HTTP URL semantics
parts = urlparse('http://' + schemeless)
# The first pymongo.Connection() argument (host) can be
# a mongodb connection URI. If this is the case, don't
# use port but let pymongo get the port(s) from the URI instead.
# This enables the use of replica sets and sharding.
# See pymongo.Connection() for more info.
port = scheme != 'mongodb' and parts.port or None
hostname = schemeless if scheme == 'mongodb' else parts.hostname
path = parts.path or ''
path = path[1:] if path and path[0] == '/' else path
return (scheme, unquote(hostname or '') or None, port,
unquote(parts.username or '') or None,
unquote(parts.password or '') or None,
unquote(path or '') or None,
kwdict(dict(parse_qsl(parts.query))))
def parse_url(url):
scheme, host, port, user, password, path, query = _parse_url(url)
return dict(transport=scheme, hostname=host,
port=port, userid=user,
password=password, virtual_host=path, **query)
| 0 |
#-*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from datetime import datetime
from selenium.webdriver.support.ui import WebDriverWait
import logging
import os
from system import system
from parameter import parameter
syst=system()
par=parameter()
lis=0
if not os.path.isdir('c:\\TEST_log\\niutouweblog\\'):
os.makedirs('c:\\TEST_log\\niutouweblog\\')
logFileName = 'c:\\TEST_log\\niutouweblog\\%s.log' %time.strftime("%m-%d-%H-%M-%S",time.localtime(time.time()))
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', filename=logFileName, level=logging.DEBUG)
#################################################################################################
#定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象#
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
#################################################################################################
logging.info('####################')
logging.info('# Begin Test #')
logging.info('####################')
def strB2Q(ustring):
"""半角转全角"""
rstring = ""
for uchar in ustring:
inside_code=ord(uchar)
if inside_code == 32: #半角空格直接转化
inside_code = 12288
elif inside_code >= 32 and inside_code <= 126: #半角字符(除空格)根据关系转化
inside_code += 65248
rstring += unichr(inside_code)
return rstring
def FileName():
global lis
lis=lis+1
filesave='c:\\screenshot\\niutouwebpic\\'
picname= '%s.png'%(datetime.utcnow().strftime('%m-%d-') + time.strftime('%H',time.localtime(time.time()))+datetime.utcnow().strftime('.%M.%S.')+datetime.utcnow().strftime('%f')[:3])
filename=filesave+str(lis)+'-'+picname
return filename
driver = webdriver.Firefox()
#driver.maximize_window()
driver.get("http://www1.niutou.com/wly/register")
time.sleep(1)
'''
driver.find_element_by_name("username").send_keys("13522166709")
driver.find_element_by_name("password").send_keys("111111")
driver.find_element_by_xpath("//input[@value='登录']").click()
time.sleep(1)
'''
#driver.find_element_by_name('username').send_keys('123')
def testtel1():
logging.info(u'手机号测试')
for var in tel1:
driver.find_element_by_id("register_muname").send_keys(var)
driver.find_element_by_id("register_mupass").send_keys("111111")
driver.find_element_by_id("register_murepass").send_keys("111111")
driver.find_element_by_id("button_register_code").click()
logging.info(u'手机号:'+var)
time.sleep(1)
driver.get_screenshot_as_file(FileName())
# driver.get_screenshot_as_png()
driver.refresh()
'''
if driver.execute("getAlertText")["value"]=="获取激活码失败":
logging.info(driver.execute("getAlertText")["value"])
driver.execute("acceptAlert")
driver.refresh()
else:
driver.get_screenshot_as_file(FileName())
driver.refresh()
'''
def testtel():
logging.info(u'手机号测试')
for var in tel2:
driver.find_element_by_id("register_muname").send_keys(var)
driver.find_element_by_id("register_mupass").send_keys("111111")
driver.find_element_by_id("register_murepass").send_keys("111111")
driver.find_element_by_id("button_register_code").click()
logging.info(u'手机号:'+var)
time.sleep(1)
# driver.get_screenshot_as_file(FileName())
logging.info(driver.execute("getAlertText")["value"])
driver.execute("acceptAlert")
# driver.get_screenshot_as_png()
driver.refresh()
'''
if driver.execute("getAlertText")["value"]=="获取激活码失败":
logging.info(driver.execute("getAlertText")["value"])
driver.execute("acceptAlert")
driver.refresh()
else:
driver.get_screenshot_as_file(FileName())
driver.refresh()
'''
logging.info(u"测试结束")
#邮箱注册
def testemail1():
logging.info(u'邮箱测试')
for var in email1:
driver.find_element_by_id("register_muname").send_keys("13522166709")
driver.find_element_by_id("register_mupass").send_keys("111111")
# driver.find_element_by_id("register_murepass").send_keys("111111")
driver.find_element_by_id("register_mcode").send_keys("864462")
driver.find_element_by_id("register_name").send_keys(u"刘美霞")
# driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/div/input[1]").click()
driver.find_element_by_id("id_no").send_keys("456789878709098765")
driver.find_element_by_id("desc").send_keys(u"测试工程师")
driver.find_element_by_id("email").send_keys(var)
driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/p[2]/button").click()
logging.info(driver.execute("getAlertText")["value"])
driver.execute("acceptAlert")
logging.info(u'邮箱:'+var)
time.sleep(1)
# driver.get_screenshot_as_file(FileName())
# driver.get_screenshot_as_png()
driver.refresh()
#邮箱注册
def testemail2():
logging.info(u'邮箱测试')
for var in email2:
driver.find_element_by_id("register_muname").send_keys("13522166709")
driver.find_element_by_id("register_mupass").send_keys("111111")
driver.find_element_by_id("register_murepass").send_keys("111111")
driver.find_element_by_id("register_mcode").send_keys("299021")
driver.find_element_by_id("register_name").send_keys(u"刘美霞")
driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/div/input[1]").click()
driver.find_element_by_id("id_no").send_keys("456789878709098765")
driver.find_element_by_id("desc").send_keys(u"测试工程师")
driver.find_element_by_id("email").send_keys(var)
driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/p[2]/button").click()
driver.get_screenshot_as_file(FileName())
logging.info(u'邮箱:'+var)
time.sleep(1)
driver.get("http://www1.niutou.com/wly/register")
time.sleep(1)
# driver.get_screenshot_as_file(FileName())
# driver.get_screenshot_as_png()
driver.refresh()
#密码
def testpassw():
logging.info(u'密码测试')
ele=driver.find_element_by_name("password")
for var in commonvar[0:21]:
ele.send_keys(var)
ele.send_keys(Keys.TAB)
time.sleep(1)
driver.get_screenshot_as_file(FileName())
logging.info(var)
ele.clear()
time.sleep(1)
#真实姓名
def testregister_name():
logging.info(u'真实姓名测试')
# ele1=driver.find_element_by_name("username")
for var in par.textinput():
driver.find_element_by_id("register_muname").send_keys("13522166709")
driver.find_element_by_id("register_mupass").send_keys("111111")
driver.find_element_by_id("register_murepass").send_keys("111111")
driver.find_element_by_id("register_mcode").send_keys("798977")
driver.find_element_by_id("register_name").send_keys(var)
driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/div/input[1]").click()
driver.find_element_by_id("id_no").send_keys("456789878709098765xx")
driver.find_element_by_id("desc").send_keys(u"测试工程师")
driver.find_element_by_id("email").send_keys("zmd@126.com")
driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/p[2]/button").click()
logging.info(driver.execute("getAlertText")["value"])
driver.execute("acceptAlert")
logging.info(var)
driver.refresh()
time.sleep(1)
#个人说明
def testdesc():
logging.info(u'个人说明测试')
# ele=driver.find_element_by_name("password")
for var in par.textinput()[23]:
driver.find_element_by_id("register_muname").send_keys("13522166709")
driver.find_element_by_id("register_mupass").send_keys("111111")
driver.find_element_by_id("register_murepass").send_keys("111111")
driver.find_element_by_id("register_mcode").send_keys("798977")
driver.find_element_by_id("register_name").send_keys("liumeixia")
driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/div/input[1]").click()
driver.find_element_by_id("id_no").send_keys("456789878709098765")
driver.find_element_by_id("desc").send_keys("var")
driver.find_element_by_id("email").send_keys("zmd@126.com")
driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/p[2]/button").click()
logging.info(driver.execute("getAlertText")["value"])
driver.execute("acceptAlert")
logging.info(var)
driver.refresh()
time.sleep(1)
#提交
def testsubmit():
logging.info(u'注册提交测试')
driver.find_element_by_id("register_muname").send_keys('13522166709')
driver.find_element_by_id("register_muname").send_keys('123abc')
driver.find_element_by_id("register_muname").send_keys(u'刘美霞')
driver.find_element_by_xpath("register_muname").click()
driver.find_element_by_id("register_muname").send_keys('41252619800908826x')
driver.find_element_by_id("register_muname").send_keys('zmdlmx@126.com')
driver.find_element_by_id("register_muname").send_keys(u'测试工程师呵呵')
driver.find_element_by_name("submit")
#姓名2-10个汉字或英文、空格。
#1.不输入任何字符;
#2.输入输入10个汉字:测测测测测测测测测测
#3.输入10个英文字母:tttttttttt;
#4.输入10个汉字,英文,数字组合:111tttt测测测
#5.输入11个汉字:测测测测测测测测测测;
#6.输入11个英文字母:ttttttttttt;
#7.随机输入很长的字符串(汉字,英文,数字,特殊字符)
#8.输入正常姓名
#9.输入1个汉字
#10.输入1个字母
#11.输入2个汉字
#12.输入2个字母
varname1=''
varname2=u'测测测测测测测测测测'
varname3=u'tttttttttt'
varname4=u'111tttt测测测'
varname5=u'测测测测测测测测测测'
varname6=u'ttttttttttt'
varname7=u'ttttttttttt测测测测测测测测测测1111111!@#$^&*(()_+}{":>?"})测测测测测测测测测测'
varname8=u'刘美霞'
varname9=u"刘"
varname10='w'
varname11=u'刘梅'
varname12='li'
names=[varname1,varname2,varname3,varname4,varname5,varname6,varname7,varname8,varname9,varname10,varname11,varname12]
#个人说明:140字以内。
#1.不输入任何字符;
#2.输入输入140个汉字:测测测测测测测测测测
#3.输入140个英文字母:tttttttttt;
#4.输入140个汉字,英文,数字组合:111tttt测测测
#5.输入141个汉字:测测测测测测测测测测;
#6.输入141个英文字母:ttttttttttt;
#7.随机输入很长的字符串(汉字,英文,数字,特殊字符)
varexplain1=u''
varexplain2=u'测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测'
varexplain3=u'ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'
varexplain4=u'11测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测ttttttttttttttttttttttttttttttttttttttt'
varexplain5=u'测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测'
varexplain6=u'tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt'
varexplain7=u'测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测测'
varexplain=[varexplain1,varexplain2,varexplain3,varexplain4,varexplain5,varexplain6,varexplain7]
var1='13346433017'
var2='15346433017'
var3='17746433017'
var4='18046433017'
var5='18146433017'
var6='18946433017'
var7='13046433017'
var8='13146433018'
var9='13246433017'
var10='14746433017'
var11='15546433017'
var12='15646433017'
var13='17646433017'
var14='18546433017'
var15='18646433017'
var16='13446433017'
var17='13546433017'
var18='13646433017'
var19='13746433017'
var20='13846433017'
var21='13946433017'
var22='14746433017'
var23='15046433017'
var24='15146433017'
var25='15246433017'
var26='15746433017'
var27='15846433017'
var28='15946433017'
var29='17846433017'
var30='18246433017'
var31='18346433017'
var32='18446433017'
var33='18746433017'
var34='18846433017'
var35='17046433017'
tel1=[var1,var2,var3,var4,var5,var6,var7,var8,var9,var10,var11,var12,var13,var14,var15,var16,var17,var18,var19,var20,var21,var22,var23,var24,var25,var26,var27,var28,var29,var30,var31,var32,var33,var34,var35]
#无效手机号
#1. 点击输入框弹出数字键盘 ,输入11位以不存在的号码段开头11,12,14,16,19
#2.点击输入框弹出数字键盘, 输入11位以不存在的号码段开头2,3,4,5,6,7,8,9
varlu1='11246433017'
varlu2='12246433017'
varlu3='10246433017'
varlu4='16246433017'
varlu5='19246433017'
varlu6='29246433017'
varlu7='39246433017'
varlu8='49246433017'
varlu9='59246433017'
varlu10='69246433017'
varlu11='79246433017'
varlu12='89246433017'
varlu13='99246433017'
tel2=[varlu1,varlu2,varlu3,varlu4,varlu5,varlu6,varlu7,varlu8,varlu9,varlu10,varlu11,varlu12,varlu13]
tels=tel1+tel2
vare1=''
vare2=u'test163com'
vare3=u'test@163com'
vare4=u'test163.com'
vare5='test@163com'
vare6=u'@163.com'
vare7=u'test@.com'
vare8=u'test@163com.'
vare9='test@163@163.com'
vare10='test@163@163.comtest@163@163.c'
vare11='test@163@163.comtest@163@163.co'
vare12=u'test@163@163.com测试'
vare13='a.b.d@c.d'
vare14='b@c.d.d'
vare15=u'ttttttttttt测测测测测测测测测测1111111!@#$^&*(()_+}{":>?"})测测测测测测测测测测'
email1=[vare1,vare2,vare3,vare4,vare5,vare6,vare7,vare8,vare9, vare10, vare11, vare12, vare13, vare14, vare15]
vareu1='test@126.com'
vareu2=u'test@sina.com'
vareu3=u'test@21cn.com'
vareu4=u'test@sohu.com'
vareu5='test@yahoo.cn'
vareu6='test@tom.com'
vareu7='test@qq.com'
vareu8='test@etang.com'
vareu9='test@eyou.com'
vareu10='test@56.com'
vareu11='test@x.cn'
vareu12='test@chinaren.com'
vareu13='test@sogou.com'
vareu14='test@citiz.com'
vareu15='test@gmail.com'
vareu16='test@msn.com'
vareu17='test@aol.com'
vareu18='test@ask.com'
vareu19='test@live.com'
vareu20='test@0355.net'
vareu21='test@163.net'
vareu22='test@263.net'
vareu23='test@3721.net'
vareu24='test@yeah.net'
vareu25='test@googlemail.com'
vareu26='test@mail.com'
vareu27='test@hotmail.com'
vareu28='test@msn.com'
vareu29='test@yahoo.com'
vareu30='test@aim.com'
vareu31='test@aol.com'
vareu32='test@walla.com'
email2=[vareu1,vareu2,vareu3,vareu4,vareu5,vareu6,vareu7,vareu8,vareu9, vareu10, vareu11, vareu12, vareu13, vareu14, vareu15,vareu16, vareu17, vareu18, vareu19, vareu20, vareu21, vareu22, vareu23, vareu24, vareu25, vareu26, vareu27, vareu28, vareu29, vareu30, vareu31, vareu32]
#email2=[var1,var2,var3,var4,var5,var6,var7,var8,var9, var10, var11, var12, var13, var14, var15,var16, var17, var18, var19, var20, var21, var22, var23, var24, var25, var26, var27, var28, var29, var30, var31, var32]
emails=email1+email2
#testdesc()
#testregister_name()
testemail1()
#testtel()
#testpassw()
#testconfirmpassw()
driver.close()
| 0.035548 |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import UniqueConstraint
from oslo_db.sqlalchemy import utils
from oslo_log import log as logging
from sqlalchemy import MetaData
from sqlalchemy.sql import null
from nova import exception
from nova.i18n import _
LOG = logging.getLogger(__name__)
UC_NAME = 'uniq_instances0uuid'
def scan_for_null_records(table, col_name, check_fkeys):
"""Queries the table looking for NULL instances of the given column.
:param col_name: The name of the column to look for in the table.
:param check_fkeys: If True, check the table for foreign keys back to the
instances table and if not found, return.
:raises: exception.ValidationError: If any records are found.
"""
if col_name in table.columns:
# NOTE(mriedem): filter out tables that don't have a foreign key back
# to the instances table since they could have stale data even if
# instances.uuid wasn't NULL.
if check_fkeys:
fkey_found = False
fkeys = table.c[col_name].foreign_keys or []
for fkey in fkeys:
if fkey.column.table.name == 'instances':
fkey_found = True
if not fkey_found:
return
records = len(list(
table.select().where(table.c[col_name] == null()).execute()
))
if records:
msg = _("There are %(records)d records in the "
"'%(table_name)s' table where the uuid or "
"instance_uuid column is NULL. These must be "
"manually cleaned up before the migration will pass. "
"Consider running the "
"'nova-manage db null_instance_uuid_scan' command.") % (
{'records': records, 'table_name': table.name})
raise exception.ValidationError(detail=msg)
def process_null_records(meta, scan=True):
"""Scans the database for null instance_uuid records for processing.
:param meta: sqlalchemy.MetaData object, assumes tables are reflected.
:param scan: If True, does a query and fails the migration if NULL instance
uuid entries found. If False, makes instances.uuid
non-nullable.
"""
if scan:
for table in reversed(meta.sorted_tables):
# NOTE(mriedem): There is a periodic task in the network manager
# that calls nova.db.api.fixed_ip_disassociate_all_by_timeout which
# will set fixed_ips.instance_uuid to None by design, so we have to
# skip the fixed_ips table otherwise we'll wipeout the pool of
# fixed IPs.
if table.name not in ('fixed_ips', 'shadow_fixed_ips'):
scan_for_null_records(table, 'instance_uuid', check_fkeys=True)
for table_name in ('instances', 'shadow_instances'):
table = meta.tables[table_name]
if scan:
scan_for_null_records(table, 'uuid', check_fkeys=False)
else:
# The record is gone so make the uuid column non-nullable.
table.columns.uuid.alter(nullable=False)
def upgrade(migrate_engine):
# NOTE(mriedem): We're going to load up all of the tables so we can find
# any with an instance_uuid column since those may be foreign keys back
# to the instances table and we want to cleanup those records first. We
# have to do this explicitly because the foreign keys in nova aren't
# defined with cascading deletes.
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
# Scan the database first and fail if any NULL records found.
process_null_records(meta, scan=True)
# Now run the alter statements.
process_null_records(meta, scan=False)
# Create a unique constraint on instances.uuid for foreign keys.
instances = meta.tables['instances']
UniqueConstraint('uuid', table=instances, name=UC_NAME).create()
# NOTE(mriedem): We now have a unique index on instances.uuid from the
# 216_havana migration and a unique constraint on the same column, which
# is redundant but should not be a big performance penalty. We should
# clean this up in a later (separate) migration since it involves dropping
# any ForeignKeys on the instances.uuid column due to some index rename
# issues in older versions of MySQL. That is beyond the scope of this
# migration.
def downgrade(migrate_engine):
# drop the unique constraint on instances.uuid
UniqueConstraint('uuid',
table=utils.get_table(migrate_engine, 'instances'),
name=UC_NAME).drop()
# We can't bring the deleted records back but we can make uuid nullable.
for table_name in ('instances', 'shadow_instances'):
table = utils.get_table(migrate_engine, table_name)
table.columns.uuid.alter(nullable=True)
| 0 |
# coding: utf-8
# Maria Clara Dantas
# pasha_and_pixels
qnt_linhas, qnt_colunas, passos = map(int, raw_input().split())
quantos_passos = []
achou_solucao = True
for i in range(passos):
l, c = map(int, raw_input().split())
quantos_passos.append([l,c])
for x in range(passos):
c_preta = 1
ja_esta_preta = []
for y in range(passos):
if quantos_passos[y] in quantos_passos[:y]:
print quantos_passos[y]
pass
else:
ja_esta_preta.append(quantos_passos[y])
p_1 = (quantos_passos[x][0] == quantos_passos[y][0] and quantos_passos[x][1] + 1 == quantos_passos[y][1])
p_2 = (quantos_passos[x][0] + 1 == quantos_passos[y][0] and quantos_passos[x][1] == quantos_passos[y][1])
p_3 = (quantos_passos[x][0] + 1 == quantos_passos[y][0] and quantos_passos[x][1] + 1 == quantos_passos[y][1])
if p_1 or p_2 or p_3:
c_preta += 1
if c_preta == 4:
print y + 1
achou_solucao = False
break
if not achou_solucao:
break
if achou_solucao:
print '0'
#if quantos_passos[x][0] == quantos_passos[y][0] and quantos_passos[x][1] + 1 == quantos_passos[y][1]:
# c_preta += 1
# celulas_ja_pretas += str(quantos_passos[y])
#elif
| 0.041952 |
"""
This module contains enumerations used in pyproj.
"""
from enum import Enum
class BaseEnum(Enum):
@classmethod
def create(cls, item):
try:
return cls(item)
except ValueError:
pass
if isinstance(item, str):
item = item.upper()
for member in cls:
if member.value == item:
return member
raise ValueError(
f"Invalid value supplied '{item}'. "
f"Only {tuple(version.value for version in cls)} are supported."
)
class WktVersion(BaseEnum):
"""
.. versionadded:: 2.2.0
Supported CRS WKT string versions
"""
#: WKT Version 2 from 2015
WKT2_2015 = "WKT2_2015"
#: WKT Version 2 from 2015 Simplified
WKT2_2015_SIMPLIFIED = "WKT2_2015_SIMPLIFIED"
#: Deprecated alias for WKT Version 2 from 2019
WKT2_2018 = "WKT2_2018"
#: Deprecated alias for WKT Version 2 from 2019 Simplified
WKT2_2018_SIMPLIFIED = "WKT2_2018_SIMPLIFIED"
#: WKT Version 2 from 2019
WKT2_2019 = "WKT2_2019"
#: WKT Version 2 from 2019 Simplified
WKT2_2019_SIMPLIFIED = "WKT2_2019_SIMPLIFIED"
#: WKT Version 1 GDAL Style
WKT1_GDAL = "WKT1_GDAL"
#: WKT Version 1 ESRI Style
WKT1_ESRI = "WKT1_ESRI"
class ProjVersion(BaseEnum):
"""
.. versionadded:: 2.2.0
Supported CRS PROJ string versions
"""
#: PROJ String version 4
PROJ_4 = 4
#: PROJ String version 5
PROJ_5 = 5
class TransformDirection(BaseEnum):
"""
.. versionadded:: 2.2.0
Supported transform directions
"""
#: Forward direction
FORWARD = "FORWARD"
#: Inverse direction
INVERSE = "INVERSE"
#: Do nothing
IDENT = "IDENT"
class PJType(BaseEnum):
"""
.. versionadded:: 2.4.0
PJ Types for listing codes with :func:`pyproj.get_codes`
Attributes
----------
UNKNOWN
ELLIPSOID
PRIME_MERIDIAN
GEODETIC_REFERENCE_FRAME
DYNAMIC_GEODETIC_REFERENCE_FRAME
VERTICAL_REFERENCE_FRAME
DYNAMIC_VERTICAL_REFERENCE_FRAME
DATUM_ENSEMBLE
CRS
GEODETIC_CRS
GEOCENTRIC_CRS
GEOGRAPHIC_CRS
GEOGRAPHIC_2D_CRS
GEOGRAPHIC_3D_CRS
VERTICAL_CRS
PROJECTED_CRS
COMPOUND_CRS
TEMPORAL_CRS
ENGINEERING_CRS
BOUND_CRS
OTHER_CRS
CONVERSION
TRANSFORMATION
CONCATENATED_OPERATION
OTHER_COORDINATE_OPERATION
"""
UNKNOWN = "UNKNOWN"
ELLIPSOID = "ELLIPSOID"
PRIME_MERIDIAN = "PRIME_MERIDIAN"
GEODETIC_REFERENCE_FRAME = "GEODETIC_REFERENCE_FRAME"
DYNAMIC_GEODETIC_REFERENCE_FRAME = "DYNAMIC_GEODETIC_REFERENCE_FRAME"
VERTICAL_REFERENCE_FRAME = "VERTICAL_REFERENCE_FRAME"
DYNAMIC_VERTICAL_REFERENCE_FRAME = "DYNAMIC_VERTICAL_REFERENCE_FRAME"
DATUM_ENSEMBLE = "DATUM_ENSEMBLE"
CRS = "CRS"
GEODETIC_CRS = "GEODETIC_CRS"
GEOCENTRIC_CRS = "GEOCENTRIC_CRS"
GEOGRAPHIC_CRS = "GEOGRAPHIC_CRS"
GEOGRAPHIC_2D_CRS = "GEOGRAPHIC_2D_CRS"
GEOGRAPHIC_3D_CRS = "GEOGRAPHIC_3D_CRS"
VERTICAL_CRS = "VERTICAL_CRS"
PROJECTED_CRS = "PROJECTED_CRS"
COMPOUND_CRS = "COMPOUND_CRS"
TEMPORAL_CRS = "TEMPORAL_CRS"
ENGINEERING_CRS = "ENGINEERING_CRS"
BOUND_CRS = "BOUND_CRS"
OTHER_CRS = "OTHER_CRS"
CONVERSION = "CONVERSION"
TRANSFORMATION = "TRANSFORMATION"
CONCATENATED_OPERATION = "CONCATENATED_OPERATION"
OTHER_COORDINATE_OPERATION = "OTHER_COORDINATE_OPERATION"
| 0 |
import numpy
import theano.tensor as T
from pylearn2.models.model import Model
from pylearn2.space import VectorSpace
from pylearn2.utils import sharedX
from pylearn2.costs.cost import Cost, DefaultDataSpecsMixin
class CNNCost(DefaultDataSpecsMixin, Cost):
supervised = True
def expr(self, model, data, **kwargs):
space, source = self.get_data_specs(model)
space.validate(data)
inputs, targets = data
outputs = model.cnn_output(inputs)
loss = -(targets * T.log(outputs)).sum(axis=1)
return loss.mean()
class CNN(Model):
"""
W1: [nvis * nhid]
b: [nhid]
W2: [nhid * 1]
c: [1]
"""
def __init__(self, nvis, nhid, nclasses):
super(CNN, self).__init__()
self.nvis = nvis
self.nhid = nhid
self.nclasses = nclasses
W1_value = numpy.random.uniform(size=(self.nvis, self.nhid))
b_value = numpy.random.uniform(size=(self.nhid))
W2_value = numpy.random.uniform(size=(self.nhid, nclasses))
c_value = numpy.random.uniform(size=(nclasses))
self.W1 = sharedX(W1_value, 'W1')
self.W2 = sharedX(W2_value, 'W2')
self.b = sharedX(b_value, 'b')
self.c = sharedX(c_value, 'c')
self._params = [self.W1, self.W2, self.b, self.c]
self.input_space = VectorSpace(dim=self.nvis)
self.output_space = VectorSpace(dim=self.nclasses)
def cnn_output(self, X):
h = T.tanh(T.dot(X, self.W1) + self.b)
o = T.tanh(T.dot(h, self.W2) + self.c)
return T.nnet.softmax(o)
| 0.027085 |
from utils import *
from User import User
from Project import Project
from Sprint import Sprint
from stasis.Singleton import get as db
class Availability:
def __init__(self, sprint):
self.sprint = sprint
def get(self, user, timestamp):
table = db()['availability']
if self.sprint.id in table:
data = table[self.sprint.id]
if user.id in data:
ts = dateToTs(timestamp)
if ts in data[user.id]:
return data[user.id][ts]
return 0
def getAll(self, timestamp):
rtn = 0
ts = dateToTs(timestamp)
table = db()['availability']
if self.sprint.id in table:
for data in table[self.sprint.id].values():
if ts in data:
rtn += data[ts]
return rtn
def set(self, user, timestamp, hours):
table = db()['availability']
if self.sprint.id not in table:
table[self.sprint.id] = {}
with table.change(self.sprint.id) as data:
if user.id not in data:
data[user.id] = {}
data[user.id][dateToTs(timestamp)] = hours
def delete(self, user):
table = db()['availability']
if self.sprint.id in table:
if user.id in table[self.sprint.id]:
with table.change(self.sprint.id) as data:
del data[user.id]
def wipe(self):
table = db()['availability']
if self.sprint.id in table:
del table[self.sprint.id]
def getAllForward(self, timestamp, user = None):
rtn = 0
ts = dateToTs(timestamp)
table = db()['availability']
if self.sprint.id in table:
for userid, data in table[self.sprint.id].iteritems():
if user is not None and user.id != userid:
continue
for thisstamp, hours in data.iteritems():
if thisstamp >= ts:
rtn += hours
return rtn
def trim(self):
table = db()['availability']
if self.sprint.id in table:
with table.change(self.sprint.id) as data:
for userid, hourmap in data.iteritems():
data[userid] = {timestamp: hours for timestamp, hours in hourmap.iteritems() if self.sprint.start <= timestamp <= self.sprint.end}
| 0.030848 |
"""
Tests for the update() queryset method that allows in-place, multi-object
updates.
"""
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class DataPoint(models.Model):
name = models.CharField(max_length=20)
value = models.CharField(max_length=20)
another_value = models.CharField(max_length=20, blank=True)
def __str__(self):
return six.text_type(self.name)
@python_2_unicode_compatible
class RelatedPoint(models.Model):
name = models.CharField(max_length=20)
data = models.ForeignKey(DataPoint)
def __str__(self):
return six.text_type(self.name)
class A(models.Model):
x = models.IntegerField(default=10)
class B(models.Model):
a = models.ForeignKey(A)
y = models.IntegerField(default=10)
class C(models.Model):
y = models.IntegerField(default=10)
class D(C):
a = models.ForeignKey(A)
class Foo(models.Model):
target = models.CharField(max_length=10, unique=True)
class Bar(models.Model):
foo = models.ForeignKey(Foo, to_field='target')
| 0 |
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
# Copyright (C) 2013-2014 science + computing ag
# Author: Sebastian Deiss <sebastian.deiss@t-online.de>
#
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Unit Tests for the GSS-API / SSPI SSHv2 Authentication (gssapi-with-mic)
"""
import socket
import threading
import unittest
import paramiko
class NullServer (paramiko.ServerInterface):
def get_allowed_auths(self, username):
return 'gssapi-with-mic'
def check_auth_gssapi_with_mic(self, username,
gss_authenticated=paramiko.AUTH_FAILED,
cc_file=None):
if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def enable_auth_gssapi(self):
UseGSSAPI = True
GSSAPICleanupCredentials = True
return UseGSSAPI
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
if command != 'yes':
return False
return True
class GSSAuthTest(unittest.TestCase):
@staticmethod
def init(username, hostname):
global krb5_principal, targ_name
krb5_principal = username
targ_name = hostname
def setUp(self):
self.username = krb5_principal
self.hostname = socket.getfqdn(targ_name)
self.sockl = socket.socket()
self.sockl.bind((targ_name, 0))
self.sockl.listen(1)
self.addr, self.port = self.sockl.getsockname()
self.event = threading.Event()
thread = threading.Thread(target=self._run)
thread.start()
def tearDown(self):
for attr in "tc ts socks sockl".split():
if hasattr(self, attr):
getattr(self, attr).close()
def _run(self):
self.socks, addr = self.sockl.accept()
self.ts = paramiko.Transport(self.socks)
host_key = paramiko.RSAKey.from_private_key_file('tests/test_rsa.key')
self.ts.add_server_key(host_key)
server = NullServer()
self.ts.start_server(self.event, server)
def test_1_gss_auth(self):
"""
Verify that Paramiko can handle SSHv2 GSS-API / SSPI authentication
(gssapi-with-mic) in client and server mode.
"""
host_key = paramiko.RSAKey.from_private_key_file('tests/test_rsa.key')
public_host_key = paramiko.RSAKey(data=host_key.asbytes())
self.tc = paramiko.SSHClient()
self.tc.get_host_keys().add('[%s]:%d' % (self.hostname, self.port),
'ssh-rsa', public_host_key)
self.tc.connect(self.hostname, self.port, username=self.username,
gss_auth=True)
self.event.wait(1.0)
self.assert_(self.event.is_set())
self.assert_(self.ts.is_active())
self.assertEquals(self.username, self.ts.get_username())
self.assertEquals(True, self.ts.is_authenticated())
stdin, stdout, stderr = self.tc.exec_command('yes')
schan = self.ts.accept(1.0)
schan.send('Hello there.\n')
schan.send_stderr('This is on stderr.\n')
schan.close()
self.assertEquals('Hello there.\n', stdout.readline())
self.assertEquals('', stdout.readline())
self.assertEquals('This is on stderr.\n', stderr.readline())
self.assertEquals('', stderr.readline())
stdin.close()
stdout.close()
stderr.close()
| 0 |
#! /usr/bin/env python
# pipe_proc module tests, calls scripts in pipe_proc_tests directory.
from __future__ import print_function
import os
import nmrglue as ng
# Framework functions
def _perform_test(glue_script, pipe_script, glue_files, pipe_files,
ignore_pipe_display=False):
"""
"""
cwd_backup = os.getcwd() # save the current working directory
# decent into the pipe_proc_tests directory
script_dir, script_fname = os.path.split(os.path.realpath(__file__))
os.chdir(os.path.join(script_dir, 'pipe_proc_tests'))
# execute the scripts and compare the files
exec(open(glue_script).read())
os.system(pipe_script)
for glue_file, pipe_file in zip(glue_files, pipe_files):
glue_dic, glue_data = ng.pipe.read(glue_file)
pipe_dic, pipe_data = ng.pipe.read(pipe_file)
os.remove(glue_file)
os.remove(pipe_file)
r1, r2 = ng.misc.pair_similar(glue_dic, glue_data, pipe_dic,
pipe_data, True,
ignore_pipe_display=ignore_pipe_display)
print(glue_file, pipe_file, r1, r2)
assert r1 is True
assert r2 is True
# return to the backup-ed directory.
os.chdir(cwd_backup)
return
def _standard_args(func_name, num_files):
""" Generate a set of standard """
glue_files = [func_name+str(i + 1)+'.glue' for i in range(num_files)]
pipe_files = [func_name+str(i + 1)+'.dat' for i in range(num_files)]
pipe_script = './' + func_name + '.com'
glue_script = './' + func_name + '.py'
return glue_script, pipe_script, glue_files, pipe_files
def _standard_test(func_name, num_files, ignore_pipe_display=False):
return _perform_test(*_standard_args(func_name, num_files),
ignore_pipe_display=ignore_pipe_display)
#########################
# Apodization functions #
#########################
def test_apod():
""" APOD function """
return _standard_test('apod', 7)
def test_em():
""" EM function """
return _standard_test('em', 2)
def test_gm():
""" GM function """
return _standard_test('gm', 3)
def test_gmb():
""" GMB function """
return _standard_test('gmb', 2)
def test_jmod():
""" JMOD function """
return _standard_test('jmod', 2)
def test_sp():
""" SP function """
return _standard_test('sp', 2)
def test_sine():
""" SINE function """
return _standard_test('sine', 2)
def test_tm():
""" TM function """
return _standard_test('tm', 2)
def test_tri():
""" TRI function """
return _standard_test('tri', 2)
###################
# Shift functions #
###################
def test_rs():
""" RS function """
return _standard_test('rs', 5)
def test_ls():
""" LS function """
return _standard_test('ls', 5)
def test_cs():
""" CS function """
return _standard_test('cs', 8)
# XXX fsh test 1-4 fail
def test_fsh():
""" FSH function """
return _standard_test('fsh', 0)
##############
# Transforms #
##############
def test_ft():
""" FT function """
return _standard_test('ft', 8)
def test_rft():
""" RFT function """
# return _standard_test('rft', 14) # XXX tests 9-11 fail
glue_files = ['rft1.glue', 'rft2.glue', 'rft3.glue', 'rft4.glue',
'rft5.glue', 'rft6.glue', 'rft7.glue', 'rft8.glue',
'rft12.glue', 'rft13.glue', 'rft14.glue']
pipe_files = ['rft1.dat', 'rft2.dat', 'rft3.dat', 'rft4.dat',
'rft5.dat', 'rft6.dat', 'rft7.dat', 'rft8.dat',
'rft12.dat', 'rft13.dat', 'rft14.dat']
pipe_script = './rft.com'
glue_script = './rft.py'
return _perform_test(glue_script, pipe_script, glue_files, pipe_files)
def test_ha():
""" HA function """
return _standard_test('ha', 2)
def test_ht():
""" HT function """
# return _standard_test('ht', 8) # XXX test 4 fails
glue_files = ['ht1.glue', 'ht2.glue', 'ht3.glue', 'ht5.glue', 'ht6.glue',
'ht7.glue', 'ht8.glue']
pipe_files = ['ht1.dat', 'ht2.dat', 'ht3.dat', 'ht5.dat', 'ht6.dat',
'ht7.dat', 'ht8.dat']
pipe_script = './ht.com'
glue_script = './ht.py'
return _perform_test(glue_script, pipe_script, glue_files, pipe_files)
##########################
# Standard NMR Functions #
##########################
def test_ps():
""" PS function """
return _standard_test('ps', 6)
def test_tp():
""" TP function """
return _standard_test('tp', 9, ignore_pipe_display=True)
def test_ytp():
""" YTP function """
return _standard_test('ytp', 3)
def test_xy2yx():
""" XY2YX function """
return _standard_test('xy2yx', 3)
def test_zf():
""" ZF function """
return _standard_test('zf', 5)
###################
# Basic Utilities #
###################
def test_add():
""" ADD function """
return _standard_test('add', 4) # Note that test 5 fails intensionally
def test_dx():
""" DX function """
return _standard_test('dx', 1)
def test_ext():
""" EXT function """
return _standard_test('ext', 11, ignore_pipe_display=True)
def test_integ():
""" INTEG function """
return _standard_test('integ', 1)
def test_mc():
""" MC function """
return _standard_test('mc', 2)
def test_mir():
""" MIR function """
return _standard_test('mir', 14)
def test_mult():
""" MULT function """
return _standard_test('mult', 3) # Note that test 4 fails intensionally
def test_rev():
""" REV function """
return _standard_test('rev', 3)
def test_set():
""" SET function """
return _standard_test('set', 4)
def test_shuf():
""" SHUF function """
return _standard_test('shuf', 7)
def test_sign():
""" SIGN function """
return _standard_test('sign', 8)
########
# Misc #
########
def test_coadd():
""" COADD function """
return _standard_test('coadd', 2)
def test_coad():
""" COAD function """
return _standard_test('coad', 2)
def test_dev():
""" DEV function """
return _standard_test('dev', 0)
def test_null():
""" NULL function """
return _standard_test('null', 2)
def test_qart():
""" QART function """
return _standard_test('qart', 2)
def test_qmix():
""" QMIX function """
return _standard_test('qmix', 2)
def test_smo():
""" SMO function """
return _standard_test('smo', 3)
def test_zd():
""" ZD function """
return _standard_test('zd', 4)
def test_save():
""" SAVE function """
return _standard_test('save', 2)
######################
# Baseline functions #
######################
def test_base():
""" BASE function """
return _standard_test('base', 7)
def test_cbf():
""" CBF function """
return _standard_test('cbf', 4)
#########
# Units #
#########
def test_units():
""" Units """
return _standard_test('units', 17)
#####################
# Inetgration Tests #
#####################
def test_2D_complex_processing():
""" 2D complex mode processing pipeline """
return _standard_test('2d_complex_processing', 1)
####################################################
# Test which are known to fail for various reasons.#
####################################################
# Tests which fail because of differences between NMRPipe and nmrglue
"""
def test_known_fail():
pipe_files = ['add5.dat',
'mult4.dat',
'shuf8.dat', 'shuf9.dat', 'shuf10.dat']
glue_files = ['add5.glue',
'mult4.glue',
'shuf8.glue', 'shuf9.glue', 'shuf10.glue']
pipe_script = 'known_fail.com'
glue_script = 'known_fail.py'
# also 'shuf11', 'shuf12' and dev1' test all fail intensionally.
return _perform_test(glue_script, pipe_script, glue_files, pipe_files)
"""
# Test which fail and should be fixed
"""
def test_to_fix():
pipe_files = ["fsh1.dat", "fsh2.dat", "fsh3.dat", "fsh4.dat"
"rft9.dat", "rft10.dat", "rft11.dat",
"ht4.dat"]
glue_files = ["fsh1.glue", "fsh2.glue", "fsh3.glue", "fsh4.glue"
"rft9.glue", "rft10.glue", "rft11.glue",
"ht4.glue"]
pipe_script = 'to_fix.com'
glue_script = 'to_fix.py'
return _perform_test(glue_script, pipe_script, glue_files, pipe_files)
"""
| 0 |
"""
Support for Sonarr.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.sonarr/
"""
import logging
import time
from datetime import datetime
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (CONF_API_KEY, CONF_HOST, CONF_PORT)
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.const import CONF_SSL
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
CONF_DAYS = 'days'
CONF_INCLUDED = 'include_paths'
CONF_UNIT = 'unit'
CONF_URLBASE = 'urlbase'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8989
DEFAULT_URLBASE = ''
DEFAULT_DAYS = '1'
DEFAULT_UNIT = 'GB'
SENSOR_TYPES = {
'diskspace': ['Disk Space', 'GB', 'mdi:harddisk'],
'queue': ['Queue', 'Episodes', 'mdi:download'],
'upcoming': ['Upcoming', 'Episodes', 'mdi:television'],
'wanted': ['Wanted', 'Episodes', 'mdi:television'],
'series': ['Series', 'Shows', 'mdi:television'],
'commands': ['Commands', 'Commands', 'mdi:code-braces']
}
ENDPOINTS = {
'diskspace': 'http{0}://{1}:{2}/{3}api/diskspace?apikey={4}',
'queue': 'http{0}://{1}:{2}/{3}api/queue?apikey={4}',
'upcoming':
'http{0}://{1}:{2}/{3}api/calendar?apikey={4}&start={5}&end={6}',
'wanted': 'http{0}://{1}:{2}/{3}api/wanted/missing?apikey={4}',
'series': 'http{0}://{1}:{2}/{3}api/series?apikey={4}',
'commands': 'http{0}://{1}:{2}/{3}api/command?apikey={4}'
}
# Support to Yottabytes for the future, why not
BYTE_SIZES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]):
vol.All(cv.ensure_list, [vol.In(list(SENSOR_TYPES.keys()))]),
vol.Optional(CONF_INCLUDED, default=[]): cv.ensure_list,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_URLBASE, default=DEFAULT_URLBASE): cv.string,
vol.Optional(CONF_DAYS, default=DEFAULT_DAYS): cv.string,
vol.Optional(CONF_UNIT, default=DEFAULT_UNIT): vol.In(BYTE_SIZES)
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Sonarr platform."""
conditions = config.get(CONF_MONITORED_CONDITIONS)
add_devices(
[SonarrSensor(hass, config, sensor) for sensor in conditions]
)
return True
class SonarrSensor(Entity):
"""Implemention of the Sonarr sensor."""
def __init__(self, hass, conf, sensor_type):
"""Create Sonarr entity."""
from pytz import timezone
self.conf = conf
self.host = conf.get(CONF_HOST)
self.port = conf.get(CONF_PORT)
self.urlbase = conf.get(CONF_URLBASE)
if self.urlbase:
self.urlbase = "%s/" % self.urlbase.strip('/')
self.apikey = conf.get(CONF_API_KEY)
self.included = conf.get(CONF_INCLUDED)
self.days = int(conf.get(CONF_DAYS))
self.ssl = 's' if conf.get(CONF_SSL) else ''
# Object data
self.data = []
self._tz = timezone(str(hass.config.time_zone))
self.type = sensor_type
self._name = SENSOR_TYPES[self.type][0]
if self.type == 'diskspace':
self._unit = conf.get(CONF_UNIT)
else:
self._unit = SENSOR_TYPES[self.type][1]
self._icon = SENSOR_TYPES[self.type][2]
# Update sensor
self._available = False
self.update()
def update(self):
"""Update the data for the sensor."""
start = get_date(self._tz)
end = get_date(self._tz, self.days)
try:
res = requests.get(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port,
self.urlbase, self.apikey, start, end),
timeout=5)
except OSError:
_LOGGER.error('Host %s is not available', self.host)
self._available = False
self._state = None
return
if res.status_code == 200:
if self.type in ['upcoming', 'queue', 'series', 'commands']:
if self.days == 1 and self.type == 'upcoming':
# Sonarr API returns an empty array if start and end dates
# are the same, so we need to filter to just today
self.data = list(
filter(
lambda x: x['airDate'] == str(start),
res.json()
)
)
else:
self.data = res.json()
self._state = len(self.data)
elif self.type == 'wanted':
data = res.json()
res = requests.get('{}&pageSize={}'.format(
ENDPOINTS[self.type].format(
self.ssl, self.host, self.port,
self.urlbase, self.apikey),
data['totalRecords']), timeout=5)
self.data = res.json()['records']
self._state = len(self.data)
elif self.type == 'diskspace':
# If included paths are not provided, use all data
if self.included == []:
self.data = res.json()
else:
# Filter to only show lists that are included
self.data = list(
filter(
lambda x: x['path'] in self.included,
res.json()
)
)
self._state = '{:.2f}'.format(
to_unit(
sum([data['freeSpace'] for data in self.data]),
self._unit
)
)
self._available = True
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format('Sonarr', self._name)
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def available(self):
"""Return sensor availability."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of the sensor."""
return self._unit
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attributes = {}
if self.type == 'upcoming':
for show in self.data:
attributes[show['series']['title']] = 'S{:02d}E{:02d}'.format(
show['seasonNumber'],
show['episodeNumber']
)
elif self.type == 'queue':
for show in self.data:
attributes[show['series']['title'] + ' S{:02d}E{:02d}'.format(
show['episode']['seasonNumber'],
show['episode']['episodeNumber']
)] = '{:.2f}%'.format(100*(1-(show['sizeleft']/show['size'])))
elif self.type == 'wanted':
for show in self.data:
attributes[show['series']['title'] + ' S{:02d}E{:02d}'.format(
show['seasonNumber'], show['episodeNumber']
)] = show['airDate']
elif self.type == 'commands':
for command in self.data:
attributes[command['name']] = command['state']
elif self.type == 'diskspace':
for data in self.data:
attributes[data['path']] = '{:.2f}/{:.2f}{} ({:.2f}%)'.format(
to_unit(data['freeSpace'], self._unit),
to_unit(data['totalSpace'], self._unit),
self._unit, (
to_unit(data['freeSpace'], self._unit) /
to_unit(data['totalSpace'], self._unit) * 100
)
)
elif self.type == 'series':
for show in self.data:
attributes[show['title']] = '{}/{} Episodes'.format(
show['episodeFileCount'],
show['episodeCount']
)
return attributes
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
def get_date(zone, offset=0):
"""Get date based on timezone and offset of days."""
day = 60 * 60 * 24
return datetime.date(
datetime.fromtimestamp(time.time() + day*offset, tz=zone)
)
def to_unit(value, unit):
"""Convert bytes to give unit."""
return value / 1024**BYTE_SIZES.index(unit)
| 0 |
# Copyright (c) 2014, Dawn Robotics Ltd
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Dawn Robotics Ltd nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import filecmp
import subprocess
import logging
#---------------------------------------------------------------------------------------------------
class BoardInfo:
#-----------------------------------------------------------------------------------------------
def __init__( self, name, processor, uploadSpeed ):
self.name = name
self.processor = processor
self.uploadSpeed = uploadSpeed
#---------------------------------------------------------------------------------------------------
DEFAULT_SERIAL_PORT_NAME = "/dev/ttyUSB0"
DEFAULT_BOARD_MODEL = "uno"
BUILD_OUTPUT_FILENAME = "/tmp/ino_build_output.txt"
BOARD_INFO_DICT = {
"uno" : BoardInfo( "uno", "atmega328p", 115200 ),
"atmega8" : BoardInfo( "atmega8", "atmega8", 19200 )
}
#---------------------------------------------------------------------------------------------------
def getInoUploaderUserDir():
homeDir = os.environ[ "HOME" ]
return homeDir + "/.ino_uploader"
#---------------------------------------------------------------------------------------------------
def upload( sketchDir, serialPortName=DEFAULT_SERIAL_PORT_NAME,
boardModel=DEFAULT_BOARD_MODEL, forceRebuild=False ):
boardInfo = BOARD_INFO_DICT[ boardModel ]
uploadSucceeded = False
inoUploaderUserDir = getInoUploaderUserDir()
sketchDirBasename = os.path.basename( sketchDir )
if len( sketchDirBasename ) == 0:
raise Exception( "Invalid sketch directory - " + sketchDir )
inoUploaderSketchDir = inoUploaderUserDir + "/" + sketchDirBasename
inoUploaderSrcDir = inoUploaderSketchDir + "/src"
sketchFiles = os.listdir( sketchDir )
# Check to see if we need to copy files over
fileCopyNeeded = False
if forceRebuild:
if os.path.exists( inoUploaderSketchDir ):
shutil.rmtree( inoUploaderSketchDir )
fileCopyNeeded = True
else:
if not os.path.exists( inoUploaderSrcDir ):
fileCopyNeeded = True
else:
match, mismatch, errors = filecmp.cmpfiles( sketchDir, inoUploaderSrcDir, sketchFiles )
if len( mismatch ) > 0 or len( errors ) > 0:
fileCopyNeeded = True
# Copy files over if needed
if fileCopyNeeded:
logging.info( "Copying sketch src files" )
if os.path.exists( inoUploaderSrcDir ):
shutil.rmtree( inoUploaderSrcDir )
shutil.copytree( sketchDir, inoUploaderSrcDir )
else:
logging.info( "No file copy needed" )
# Now try to build the sketch
logging.debug( "Building sketch in dir " + inoUploaderSketchDir )
outputFile = open( BUILD_OUTPUT_FILENAME, "w" )
buildResult = subprocess.call(
[ "/usr/local/bin/ino", "build", "-m", boardModel ], cwd=inoUploaderSketchDir,
stdout=outputFile, stderr=outputFile )
outputFile.close()
# Upload if the build was successful
if buildResult == 0:
hexFilename = inoUploaderSketchDir + "/.build/{0}/firmware.hex".format( boardModel )
logging.debug( "Trying to upload " + hexFilename )
uploadResult = subprocess.call( [ "avrdude",
"-p", boardInfo.processor,
"-P", serialPortName, "-c", "arduino", "-b", str( boardInfo.uploadSpeed ),
"-D", "-U", "flash:w:{0}:i".format( hexFilename ) ] )
logging.debug( "uploadResult = " + str( uploadResult ) )
if uploadResult == 0:
uploadSucceeded = True
else:
logging.warning( "Building of sketch was unsuccessful" )
return uploadSucceeded | 0.026627 |
import requests
import csv
import time
import re
from bs4 import BeautifulSoup
CSE_CATALOG_URL = 'https://cse.ucsd.edu/undergraduate/courses/prerequisites-cse-undergraduate-classes'
def clean(string, utf8=None):
string = string.encode('utf-8')
replace_values = [(u"\xa0".encode('utf-8'), " "), (u"\u2014".encode('utf-8'), "-"), \
(u"\u0027".encode('utf-8'), "'"), (u"\u2013".encode('utf-8'), "-"), \
(u"\u2019".encode('utf-8'), "'")]
for utf, new in replace_values:
string = string.replace(utf, new)
if utf8 and utf8 is True:
return " ".join(string.split()).encode('utf-8')
return " ".join(string.split())
def extract_catalog_dict(record):
course_ids = []
course_titles = []
course_prereqs = []
if record:
start = time.time()
print "Requesting access to the UCSD catalog page at %s..." % CSE_CATALOG_URL
page = requests.get(CSE_CATALOG_URL)
if record:
print "Webscraping the catalog..."
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table')
# Find all the <tr> tag pairs, skip the first one, then for each.
for row in table.find_all('tr')[1:]:
col = row.find_all('td')
cse_course = clean(col[0].text.strip(), True)
course_ids.append(cse_course)
title = clean(col[1].text.strip(), True)
course_titles.append(title)
#creates a list of preqreqs, with a few rules
#NO CO-REQUISITES
#NO SELF-DEPENDENCIES
prereqs = col[2].text.strip().split('***', 1)[0].split('Co-requisite', 1)[0].split('co-requisite', 1)[0]
prereqs = clean(prereqs.replace(cse_course, ""), True)
# 1 capital letter, 0+ letters, space, 1+ digits, 0 or 1 letter
# i.e. 'CSE 30' or 'CSE 8A'
pattern = "[A-Z][a-zA-Z]*\s?[0-9]+[a-zA-Z]?"
or_pattern = "(?:[a-zA-Z]+\s?[0-9]+[a-zA-Z]?)+(?: or [a-zA-Z]+\s?[0-9]+[a-zA-Z]?)+"
# creates a list of prereqs based on the regex
or_filter = re.findall(or_pattern, prereqs)
reg_filter = re.findall(pattern, prereqs)
filtered_prereqs = [course for course in reg_filter if not any(course in c for c in or_filter)]
filtered_prereqs += [courses.split(" or ") for courses in or_filter]
course_prereqs.append(filtered_prereqs)
if record:
end = time.time()
print "Completed scraping... %.3f seconds" % (end-start)
write_catalog(course_ids, course_titles, course_prereqs, record)
return course_ids, course_titles, course_prereqs
def write_catalog(ids, titles, prereqs, record):
if record:
start = time.time()
print "Writing to the csv file 'courses.csv'..."
with open('courses.csv', 'wb') as csvfile:
writer = csv.writer(csvfile)
rows = zip(ids, titles, prereqs)
writer.writerows(rows)
if record:
end = time.time()
print "Completed writing to file... %.3f seconds" % (end-start)
def read_catalog(csv_file):
catalog = []
with open(csv_file, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
c_id = row[0]
c_title = row[1]
c_prereqs = ''.join([x for x in row[2] if x not in ["'", "[", "]"]]).split(', ')
#makes a copy to loop through and remove 'or prerequisites'
c_p_copy = [c for c in c_prereqs]
or_pattern = "\[[a-zA-Z0-9, ]+]"
#finds and parses the string for 'or prerequisites'
or_c_prereqs = re.findall(or_pattern, row[2][1:-1].replace("'", ""))
#parses the list of 'or prerequisites'
for i in range(len(or_c_prereqs)):
or_c_prereqs[i] = or_c_prereqs[i][1:-1].split(', ')
#removes any courses in the 'or prerequisites'
for c in c_p_copy:
if any(c in course for course in or_c_prereqs):
c_prereqs.remove(c)
#combine the lists and remove any empty strings
c_prereqs += or_c_prereqs
c_prereqs = filter(None, c_prereqs)
catalog.append((c_id, c_title, c_prereqs))
course_catalog = {}
#create a dictionary of the courses and the prereqs
#use course_id as key, a tuple of id/title/prereq as value
for x, y, z in catalog:
course_catalog[x.upper().replace(" ", "")] = (x, y, z)
return course_catalog | 0.031329 |
import os
import logging
import mimetypes
import swaf
import swaf.router
import swaf.resp
from swaf.misc import Regex
@swaf.handler(swaf.router.NotFoundRoute)
def notFound():
return swaf.resp.notFound()
class PkgFileServer:
def __init__(self, pkg, notFoundResp=None):
self.swaf_route = Regex('^/static/')
self._root = os.path.join( pkg.__path__[0], 'static' )
self._available = set()
self._log = logging.getLogger('swaf.PkgFileServer')
self.rescan()
if notFoundResp==None:
notFoundResp = swaf.resp.notFound()
self.notFoundResp = notFoundResp
self.swaf_description = 'static files server'
def rescan(self):
self._available = set()
for dirpath, dirnames, filenames in os.walk(self._root):
for fn in filenames:
reldirpath = dirpath[len(self._root):]
fp = os.path.join('/',reldirpath, fn)
self._available.add(fp)
def __call__(self, **req):
if not req['path'].startswith('/static/'):
self._log.error("misrouted request! %s does not start with '/static/'" % repr(req['path']))
return self.notFoundResp
rel = req['path'][len('/static/'):]
if '/'+rel in self._available:
full_path = os.path.join(self._root, rel)
mime_type = mimetypes.guess_type(full_path)[0]
return swaf.resp.ok(mime_type, open(full_path))
else:
return self.notFoundResp
| 0.036474 |
# File name: toolbox.py
import kivy
kivy.require('1.9.0')
import math
from kivy.uix.togglebutton import ToggleButton
from kivy.graphics import Line, Color
from comicwidgets import StickMan, DraggableWidget
class ToolButton(ToggleButton):
def on_touch_down(self, touch):
ds = self.parent.drawing_space
if self.state == 'down' and ds.parent.collide_point(touch.x,touch.y):
(x,y) = ds.to_widget(touch.x, touch.y)
self.draw(ds,x,y)
return True
return super(ToolButton, self).on_touch_down(touch)
def draw(self, ds, x, y):
pass
class ToolStickman(ToolButton):
def draw(self, ds, x, y):
sm = StickMan(width=48, height=48)
sm.center = (x,y)
screen_manager = self.parent.comic_creator.manager
color_picker = screen_manager.color_picker
sm.canvas.before.add(Color(*color_picker.color))
ds.add_widget(sm)
class ToolFigure(ToolButton):
def draw(self, ds, x, y):
(self.ix, self.iy) = (x,y)
screen_manager = self.parent.comic_creator.manager
color_picker = screen_manager.color_picker
with ds.canvas:
Color(*color_picker.color)
self.figure=self.create_figure(x,y,x+1,y+1)
ds.bind(on_touch_move=self.update_figure)
ds.bind(on_touch_up=self.end_figure)
def update_figure(self, ds, touch):
ds.canvas.remove(self.figure)
with ds.canvas:
self.figure = self.create_figure(self.ix, self.iy,touch.x,touch.y)
def end_figure(self, ds, touch):
ds.unbind(on_touch_move=self.update_figure)
ds.unbind(on_touch_up=self.end_figure)
ds.canvas.remove(self.figure)
self.widgetize(ds,self.ix,self.iy,touch.x,touch.y)
def widgetize(self,ds,ix,iy,fx,fy):
widget = self.create_widget(ix,iy,fx,fy)
(ix,iy) = widget.to_local(ix,iy,relative=True)
(fx,fy) = widget.to_local(fx,fy,relative=True)
screen_manager = self.parent.comic_creator.manager
color_picker = screen_manager.color_picker
widget.canvas.add(Color(*color_picker.color))
widget.canvas.add(self.create_figure(ix,iy,fx,fy))
ds.add_widget(widget)
def create_figure(self,ix,iy,fx,fy):
pass
def create_widget(self,ix,iy,fx,fy):
pass
class ToolLine(ToolFigure):
def create_figure(self,ix,iy,fx,fy):
return Line(points=[ix, iy, fx, fy])
def create_widget(self,ix,iy,fx,fy):
pos = (min(ix, fx), min(iy, fy))
size = (abs(fx-ix), abs(fy-iy))
return DraggableWidget(pos = pos, size = size)
class ToolCircle(ToolFigure):
def create_figure(self,ix,iy,fx,fy):
return Line(circle=[ix,iy,math.hypot(ix-fx,iy-fy)])
def create_widget(self,ix,iy,fx,fy):
r = math.hypot(ix-fx, iy-fy)
pos = (ix-r, iy-r)
size = (2*r, 2*r)
return DraggableWidget(pos = pos, size = size)
| 0.02722 |
#!/usr/bin/env python
########################################################################
# $HeadURL: svn+ssh://svn.cern.ch/reps/dirac/DIRAC/trunk/DIRAC/AccountingSystem/scripts/dirac-accounting-decode-fileid.py $
# File : dirac-accounting-decode-fileid
# Author : Adria Casajus
########################################################################
"""
Decode Accounting plot URLs
"""
__RCSID__ = "$Id: dirac-accounting-decode-fileid.py 31037 2010-11-30 15:06:46Z acasajus $"
import pprint
import sys
import urlparse
import cgi
from DIRAC import gLogger
from DIRAC.Core.Base import Script
from DIRAC.AccountingSystem.private.FileCoding import extractRequestFromFileId
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... URL ...' % Script.scriptName,
'Arguments:',
' URL: encoded URL of a DIRAC Accounting plot'] ) )
Script.parseCommandLine()
fileIds = Script.getPositionalArgs()
for fileId in fileIds:
#Try to find if it's a url
parseRes = urlparse.urlparse( fileId )
if parseRes.query:
queryRes = cgi.parse_qs( parseRes.query )
if 'file' in queryRes:
fileId = queryRes[ 'file' ][0]
#Decode
result = extractRequestFromFileId( fileId )
if not result[ 'OK' ]:
gLogger.error( "Could not decode fileId", "'%s', error was %s" % ( fileId, result[ 'Message' ] ) )
sys.exit( 1 )
gLogger.notice( "Decode for '%s' is:\n%s" % ( fileId, pprint.pformat( result[ 'Value' ] ) ) )
sys.exit( 0 )
| 0.031269 |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"ocaml support"
import os, re
from waflib import Utils, Task
from waflib.Logs import error
from waflib.TaskGen import feature, before_method, after_method, extension
EXT_MLL = ['.mll']
EXT_MLY = ['.mly']
EXT_MLI = ['.mli']
EXT_MLC = ['.c']
EXT_ML = ['.ml']
open_re = re.compile(r'^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M)
foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M)
def filter_comments(txt):
meh = [0]
def repl(m):
if m.group(1):
meh[0] += 1
elif m.group(2):
meh[0] -= 1
elif not meh[0]:
return m.group()
return ''
return foo.sub(repl, txt)
def scan(self):
node = self.inputs[0]
code = filter_comments(node.read())
global open_re
names = []
import_iterator = open_re.finditer(code)
if import_iterator:
for import_match in import_iterator:
names.append(import_match.group(1))
found_lst = []
raw_lst = []
for name in names:
nd = None
for x in self.incpaths:
nd = x.find_resource(name.lower()+'.ml')
if not nd:
nd = x.find_resource(name+'.ml')
if nd:
found_lst.append(nd)
break
else:
raw_lst.append(name)
return (found_lst, raw_lst)
native_lst=['native', 'all', 'c_object']
bytecode_lst=['bytecode', 'all']
@feature('ocaml')
def init_ml(self):
Utils.def_attrs(self,
type = 'all',
incpaths_lst = [],
bld_incpaths_lst = [],
mlltasks = [],
mlytasks = [],
mlitasks = [],
native_tasks = [],
bytecode_tasks = [],
linktasks = [],
bytecode_env = None,
native_env = None,
compiled_tasks = [],
includes = '',
uselib = '',
are_deps_set = 0)
@feature('ocaml')
@after_method('init_ml')
def init_envs_ml(self):
self.islibrary = getattr(self, 'islibrary', False)
global native_lst, bytecode_lst
self.native_env = None
if self.type in native_lst:
self.native_env = self.env.derive()
if self.islibrary:
self.native_env['OCALINKFLAGS'] = '-a'
self.bytecode_env = None
if self.type in bytecode_lst:
self.bytecode_env = self.env.derive()
if self.islibrary:
self.bytecode_env['OCALINKFLAGS'] = '-a'
if self.type == 'c_object':
self.native_env.append_unique('OCALINKFLAGS_OPT', '-output-obj')
@feature('ocaml')
@before_method('apply_vars_ml')
@after_method('init_envs_ml')
def apply_incpaths_ml(self):
inc_lst = self.includes.split()
lst = self.incpaths_lst
for dir in inc_lst:
node = self.path.find_dir(dir)
if not node:
error("node not found: " + str(dir))
continue
if not node in lst:
lst.append(node)
self.bld_incpaths_lst.append(node)
# now the nodes are added to self.incpaths_lst
@feature('ocaml')
@before_method('process_source')
def apply_vars_ml(self):
for i in self.incpaths_lst:
if self.bytecode_env:
app = self.bytecode_env.append_value
app('OCAMLPATH', ['-I', i.bldpath(), '-I', i.srcpath()])
if self.native_env:
app = self.native_env.append_value
app('OCAMLPATH', ['-I', i.bldpath(), '-I', i.srcpath()])
varnames = ['INCLUDES', 'OCAMLFLAGS', 'OCALINKFLAGS', 'OCALINKFLAGS_OPT']
for name in self.uselib.split():
for vname in varnames:
cnt = self.env[vname+'_'+name]
if cnt:
if self.bytecode_env:
self.bytecode_env.append_value(vname, cnt)
if self.native_env:
self.native_env.append_value(vname, cnt)
@feature('ocaml')
@after_method('process_source')
def apply_link_ml(self):
if self.bytecode_env:
ext = self.islibrary and '.cma' or '.run'
linktask = self.create_task('ocalink')
linktask.bytecode = 1
linktask.set_outputs(self.path.find_or_declare(self.target + ext))
linktask.env = self.bytecode_env
self.linktasks.append(linktask)
if self.native_env:
if self.type == 'c_object':
ext = '.o'
elif self.islibrary:
ext = '.cmxa'
else:
ext = ''
linktask = self.create_task('ocalinkx')
linktask.set_outputs(self.path.find_or_declare(self.target + ext))
linktask.env = self.native_env
self.linktasks.append(linktask)
# we produce a .o file to be used by gcc
self.compiled_tasks.append(linktask)
@extension(*EXT_MLL)
def mll_hook(self, node):
mll_task = self.create_task('ocamllex', node, node.change_ext('.ml'))
mll_task.env = self.native_env.derive()
self.mlltasks.append(mll_task)
self.source.append(mll_task.outputs[0])
@extension(*EXT_MLY)
def mly_hook(self, node):
mly_task = self.create_task('ocamlyacc', node, [node.change_ext('.ml'), node.change_ext('.mli')])
mly_task.env = self.native_env.derive()
self.mlytasks.append(mly_task)
self.source.append(mly_task.outputs[0])
task = self.create_task('ocamlcmi', mly_task.outputs[1], mly_task.outputs[1].change_ext('.cmi'))
task.env = self.native_env.derive()
@extension(*EXT_MLI)
def mli_hook(self, node):
task = self.create_task('ocamlcmi', node, node.change_ext('.cmi'))
task.env = self.native_env.derive()
self.mlitasks.append(task)
@extension(*EXT_MLC)
def mlc_hook(self, node):
task = self.create_task('ocamlcc', node, node.change_ext('.o'))
task.env = self.native_env.derive()
self.compiled_tasks.append(task)
@extension(*EXT_ML)
def ml_hook(self, node):
if self.native_env:
task = self.create_task('ocamlx', node, node.change_ext('.cmx'))
task.env = self.native_env.derive()
task.incpaths = self.bld_incpaths_lst
self.native_tasks.append(task)
if self.bytecode_env:
task = self.create_task('ocaml', node, node.change_ext('.cmo'))
task.env = self.bytecode_env.derive()
task.bytecode = 1
task.incpaths = self.bld_incpaths_lst
self.bytecode_tasks.append(task)
def compile_may_start(self):
if not getattr(self, 'flag_deps', ''):
self.flag_deps = 1
# the evil part is that we can only compute the dependencies after the
# source files can be read (this means actually producing the source files)
if getattr(self, 'bytecode', ''):
alltasks = self.generator.bytecode_tasks
else:
alltasks = self.generator.native_tasks
self.signature() # ensure that files are scanned - unfortunately
tree = self.generator.bld
for node in self.inputs:
lst = tree.node_deps[self.uid()]
for depnode in lst:
for t in alltasks:
if t == self:
continue
if depnode in t.inputs:
self.set_run_after(t)
# TODO necessary to get the signature right - for now
delattr(self, 'cache_sig')
self.signature()
return Task.Task.runnable_status(self)
class ocamlx(Task.Task):
"""native caml compilation"""
color = 'GREEN'
run_str = '${OCAMLOPT} ${OCAMLPATH} ${OCAMLFLAGS} ${OCAMLINCLUDES} -c -o ${TGT} ${SRC}'
scan = scan
runnable_status = compile_may_start
class ocaml(Task.Task):
"""bytecode caml compilation"""
color = 'GREEN'
run_str = '${OCAMLC} ${OCAMLPATH} ${OCAMLFLAGS} ${OCAMLINCLUDES} -c -o ${TGT} ${SRC}'
scan = scan
runnable_status = compile_may_start
class ocamlcmi(Task.Task):
"""interface generator (the .i files?)"""
color = 'BLUE'
run_str = '${OCAMLC} ${OCAMLPATH} ${OCAMLINCLUDES} -o ${TGT} -c ${SRC}'
before = ['ocamlcc', 'ocaml', 'ocamlcc']
class ocamlcc(Task.Task):
"""ocaml to c interfaces"""
color = 'GREEN'
run_str = 'cd ${TGT[0].bld_dir()} && ${OCAMLOPT} ${OCAMLFLAGS} ${OCAMLPATH} ${OCAMLINCLUDES} -c ${SRC[0].abspath()}'
class ocamllex(Task.Task):
"""lexical generator"""
color = 'BLUE'
run_str = '${OCAMLLEX} ${SRC} -o ${TGT}'
before = ['ocamlcmi', 'ocaml', 'ocamlcc']
class ocamlyacc(Task.Task):
"""parser generator"""
color = 'BLUE'
run_str = '${OCAMLYACC} -b ${tsk.base()} ${SRC}'
before = ['ocamlcmi', 'ocaml', 'ocamlcc']
def base(self):
node = self.outputs[0]
s = os.path.splitext(node.name)[0]
return node.bld_dir() + os.sep + s
def link_may_start(self):
if getattr(self, 'bytecode', 0):
alltasks = self.generator.bytecode_tasks
else:
alltasks = self.generator.native_tasks
for x in alltasks:
if not x.hasrun:
return Task.ASK_LATER
if not getattr(self, 'order', ''):
# now reorder the inputs given the task dependencies
# this part is difficult, we do not have a total order on the tasks
# if the dependencies are wrong, this may not stop
seen = []
pendant = []+alltasks
while pendant:
task = pendant.pop(0)
if task in seen:
continue
for x in task.run_after:
if not x in seen:
pendant.append(task)
break
else:
seen.append(task)
self.inputs = [x.outputs[0] for x in seen]
self.order = 1
return Task.Task.runnable_status(self)
class ocalink(Task.Task):
"""bytecode caml link"""
color = 'YELLOW'
run_str = '${OCAMLC} -o ${TGT} ${OCAMLINCLUDES} ${OCALINKFLAGS} ${SRC}'
runnable_status = link_may_start
after = ['ocaml', 'ocamlcc']
class ocalinkx(Task.Task):
"""native caml link"""
color = 'YELLOW'
run_str = '${OCAMLOPT} -o ${TGT} ${OCAMLINCLUDES} ${OCALINKFLAGS_OPT} ${SRC}'
runnable_status = link_may_start
after = ['ocamlx', 'ocamlcc']
def configure(conf):
opt = conf.find_program('ocamlopt', var='OCAMLOPT', mandatory=False)
occ = conf.find_program('ocamlc', var='OCAMLC', mandatory=False)
if (not opt) or (not occ):
conf.fatal('The objective caml compiler was not found:\ninstall it or make it available in your PATH')
v = conf.env
v['OCAMLC'] = occ
v['OCAMLOPT'] = opt
v['OCAMLLEX'] = conf.find_program('ocamllex', var='OCAMLLEX', mandatory=False)
v['OCAMLYACC'] = conf.find_program('ocamlyacc', var='OCAMLYACC', mandatory=False)
v['OCAMLFLAGS'] = ''
where = conf.cmd_and_log(conf.env.OCAMLC + ['-where']).strip()+os.sep
v['OCAMLLIB'] = where
v['LIBPATH_OCAML'] = where
v['INCLUDES_OCAML'] = where
v['LIB_OCAML'] = 'camlrun'
| 0.035055 |
'''
Copyright (c) 2011-2017, Agora Games, LLC All rights reserved.
https://github.com/agoragames/chai/blob/master/LICENSE.txt
'''
import inspect
import types
import sys
import gc
from .expectation import Expectation
from .spy import Spy
from .exception import *
from ._termcolor import colored
# For clarity here and in tests, could make these class or static methods on
# Stub. Chai base class would hide that.
def stub(obj, attr=None):
'''
Stub an object. If attr is not None, will attempt to stub that attribute
on the object. Only required for modules and other rare cases where we
can't determine the binding from the object.
'''
if attr:
return _stub_attr(obj, attr)
else:
return _stub_obj(obj)
def _stub_attr(obj, attr_name):
'''
Stub an attribute of an object. Will return an existing stub if
there already is one.
'''
# Annoying circular reference requires importing here. Would like to see
# this cleaned up. @AW
from .mock import Mock
# Check to see if this a property, this check is only for when dealing
# with an instance. getattr will work for classes.
is_property = False
if not inspect.isclass(obj) and not inspect.ismodule(obj):
# It's possible that the attribute is defined after initialization, and
# so is not on the class itself.
attr = getattr(obj.__class__, attr_name, None)
if isinstance(attr, property):
is_property = True
if not is_property:
attr = getattr(obj, attr_name)
# Return an existing stub
if isinstance(attr, Stub):
return attr
# If a Mock object, stub its __call__
if isinstance(attr, Mock):
return stub(attr.__call__)
if isinstance(attr, property):
return StubProperty(obj, attr_name)
# Sadly, builtin functions and methods have the same type, so we have to
# use the same stub class even though it's a bit ugly
if inspect.ismodule(obj) and isinstance(attr, (types.FunctionType,
types.BuiltinFunctionType,
types.BuiltinMethodType)):
return StubFunction(obj, attr_name)
# In python3 unbound methods are treated as functions with no reference
# back to the parent class and no im_* fields. We can still make unbound
# methods work by passing these through to the stub
if inspect.isclass(obj) and isinstance(attr, types.FunctionType):
return StubUnboundMethod(obj, attr_name)
# I thought that types.UnboundMethodType differentiated these cases but
# apparently not.
if isinstance(attr, types.MethodType):
# Handle differently if unbound because it's an implicit "any instance"
if getattr(attr, 'im_self', None) is None:
# Handle the python3 case and py2 filter
if hasattr(attr, '__self__'):
if attr.__self__ is not None:
return StubMethod(obj, attr_name)
if sys.version_info.major == 2:
return StubUnboundMethod(attr)
else:
return StubMethod(obj, attr_name)
if isinstance(attr, (types.BuiltinFunctionType, types.BuiltinMethodType)):
return StubFunction(obj, attr_name)
# What an absurd type this is ....
if type(attr).__name__ == 'method-wrapper':
return StubMethodWrapper(attr)
# This is also slot_descriptor
if type(attr).__name__ == 'wrapper_descriptor':
return StubWrapperDescriptor(obj, attr_name)
raise UnsupportedStub(
"can't stub %s(%s) of %s", attr_name, type(attr), obj)
def _stub_obj(obj):
'''
Stub an object directly.
'''
# Annoying circular reference requires importing here. Would like to see
# this cleaned up. @AW
from .mock import Mock
# Return an existing stub
if isinstance(obj, Stub):
return obj
# If a Mock object, stub its __call__
if isinstance(obj, Mock):
return stub(obj.__call__)
# If passed-in a type, assume that we're going to stub out the creation.
# See StubNew for the awesome sauce.
# if isinstance(obj, types.TypeType):
if hasattr(types, 'TypeType') and isinstance(obj, types.TypeType):
return StubNew(obj)
elif hasattr(__builtins__, 'type') and \
isinstance(obj, __builtins__.type):
return StubNew(obj)
elif inspect.isclass(obj):
return StubNew(obj)
# I thought that types.UnboundMethodType differentiated these cases but
# apparently not.
if isinstance(obj, types.MethodType):
# Handle differently if unbound because it's an implicit "any instance"
if getattr(obj, 'im_self', None) is None:
# Handle the python3 case and py2 filter
if hasattr(obj, '__self__'):
if obj.__self__ is not None:
return StubMethod(obj)
if sys.version_info.major == 2:
return StubUnboundMethod(obj)
else:
return StubMethod(obj)
# These aren't in the types library
if type(obj).__name__ == 'method-wrapper':
return StubMethodWrapper(obj)
if type(obj).__name__ == 'wrapper_descriptor':
raise UnsupportedStub(
"must call stub(obj,'%s') for slot wrapper on %s",
obj.__name__, obj.__objclass__.__name__)
# (Mostly) Lastly, look for properties.
# First look for the situation where there's a reference back to the
# property.
prop = obj
if isinstance(getattr(obj, '__self__', None), property):
obj = prop.__self__
# Once we've found a property, we have to figure out how to reference
# back to the owning class. This is a giant pain and we have to use gc
# to find out where it comes from. This code is dense but resolves to
# something like this:
# >>> gc.get_referrers( foo.x )
# [{'__dict__': <attribute '__dict__' of 'foo' objects>,
# 'x': <property object at 0x7f68c99a16d8>,
# '__module__': '__main__',
# '__weakref__': <attribute '__weakref__' of 'foo' objects>,
# '__doc__': None}]
if isinstance(obj, property):
klass, attr = None, None
for ref in gc.get_referrers(obj):
if klass and attr:
break
if isinstance(ref, dict) and ref.get('prop', None) is obj:
klass = getattr(
ref.get('__dict__', None), '__objclass__', None)
for name, val in getattr(klass, '__dict__', {}).items():
if val is obj:
attr = name
break
# In the case of PyPy, we have to check all types that refer to
# the property, and see if any of their attrs are the property
elif isinstance(ref, type):
# Use dir as a means to quickly walk through the class tree
for name in dir(ref):
if getattr(ref, name) == obj:
klass = ref
attr = name
break
if klass and attr:
rval = stub(klass, attr)
if prop != obj:
return stub(rval, prop.__name__)
return rval
# If a function and it has an associated module, we can mock directly.
# Note that this *must* be after properties, otherwise it conflicts with
# stubbing out the deleter methods and such
# Sadly, builtin functions and methods have the same type, so we have to
# use the same stub class even though it's a bit ugly
if isinstance(obj, (types.FunctionType, types.BuiltinFunctionType,
types.BuiltinMethodType)) and hasattr(obj, '__module__'):
return StubFunction(obj)
raise UnsupportedStub("can't stub %s", obj)
class Stub(object):
'''
Base class for all stubs.
'''
def __init__(self, obj, attr=None):
'''
Setup the structs for expectations
'''
self._obj = obj
self._attr = attr
self._expectations = []
self._torn = False
@property
def name(self):
return None # The base class implement this.
@property
def expectations(self):
return self._expectations
def unmet_expectations(self):
'''
Assert that all expectations on the stub have been met.
'''
unmet = []
for exp in self._expectations:
if not exp.closed(with_counts=True):
unmet.append(ExpectationNotSatisfied(exp))
return unmet
def teardown(self):
'''
Clean up all expectations and restore the original attribute of the
mocked object.
'''
if not self._torn:
self._expectations = []
self._torn = True
self._teardown()
def _teardown(self):
'''
Hook for subclasses to teardown their stubs. Called only once.
'''
def expect(self):
'''
Add an expectation to this stub. Return the expectation.
'''
exp = Expectation(self)
self._expectations.append(exp)
return exp
def spy(self):
'''
Add a spy to this stub. Return the spy.
'''
spy = Spy(self)
self._expectations.append(spy)
return spy
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
raise NotImplementedError("Must be implemented by subclasses")
def __call__(self, *args, **kwargs):
for exp in self._expectations:
# If expectation closed skip
if exp.closed():
continue
# If args don't match the expectation but its minimum counts have
# been met, close it and move on, else it's an unexpected call.
# Have to check counts here now due to the looser definitions of
# expectations in 0.3.x If we dont match, the counts aren't met
# and we're not allowing out-of-order, then break out and raise
# an exception.
if not exp.match(*args, **kwargs):
if exp.counts_met():
exp.close(*args, **kwargs)
elif not exp.is_any_order():
break
else:
return exp.test(*args, **kwargs)
raise UnexpectedCall(
call=self.name, suffix=self._format_exception(),
args=args, kwargs=kwargs)
def _format_exception(self):
result = [
colored("All expectations", 'white', attrs=['bold'])
]
for e in self._expectations:
result.append(str(e))
return "\n".join(result)
class StubProperty(Stub, property):
'''
Property stubbing.
'''
def __init__(self, obj, attr):
super(StubProperty, self).__init__(obj, attr)
property.__init__(self, lambda x: self(),
lambda x, val: self.setter(val),
lambda x: self.deleter())
# In order to stub out a property we have ask the class for the
# propery object that was created we python execute class code.
if inspect.isclass(obj):
self._instance = obj
else:
self._instance = obj.__class__
# Use a simple Mock object for the deleter and setter. Use same
# namespace as property type so that it simply works.
# Annoying circular reference requires importing here. Would like to
# see this cleaned up. @AW
from .mock import Mock
self._obj = getattr(self._instance, attr)
self.setter = Mock()
self.deleter = Mock()
setattr(self._instance, self._attr, self)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: this is probably the most complicated one to implement. Will
# figure it out eventually.
raise NotImplementedError("property spies are not supported")
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubMethod(Stub):
'''
Stub a method.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object of type MethodType
'''
super(StubMethod, self).__init__(obj, attr)
if not self._attr:
# python3
if sys.version_info.major == 3: # hasattr(obj,'__func__'):
self._attr = obj.__func__.__name__
else:
self._attr = obj.im_func.func_name
if sys.version_info.major == 3: # hasattr(obj, '__self__'):
self._instance = obj.__self__
else:
self._instance = obj.im_self
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
setattr(self._instance, self._attr, self)
@property
def name(self):
from .mock import Mock # Import here for the same reason as above.
if hasattr(self._obj, 'im_class'):
if issubclass(self._obj.im_class, Mock):
return self._obj.im_self._name
# Always use the class to get the name
klass = self._instance
if not inspect.isclass(self._instance):
klass = self._instance.__class__
return "%s.%s" % (klass.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
return self._obj.__func__(self._instance, *args, **kwargs)
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
return self._obj.im_func(self._instance, *args, **kwargs)
else:
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Put the original method back in place. This will also handle the
special case when it putting back a class method.
The following code snippet best describe why it fails using settar,
the class method would be replaced with a bound method not a class
method.
>>> class Example(object):
... @classmethod
... def a_classmethod(self):
... pass
...
>>> Example.__dict__['a_classmethod']
<classmethod object at 0x7f5e6c298be8>
>>> orig = getattr(Example, 'a_classmethod')
>>> orig
<bound method type.a_classmethod of <class '__main__.Example'>>
>>> setattr(Example, 'a_classmethod', orig)
>>> Example.__dict__['a_classmethod']
<bound method type.a_classmethod of <class '__main__.Example'>>
The only way to figure out if this is a class method is to check and
see if the bound method im_self is a class, if so then we need to wrap
the function object (im_func) with class method before setting it back
on the class.
'''
# Figure out if this is a class method and we're unstubbing it on the
# class to which it belongs. This addresses an edge case where a
# module can expose a method of an instance. e.g gevent.
if hasattr(self._obj, '__self__') and \
inspect.isclass(self._obj.__self__) and \
self._obj.__self__ is self._instance:
setattr(
self._instance, self._attr, classmethod(self._obj.__func__))
elif hasattr(self._obj, 'im_self') and \
inspect.isclass(self._obj.im_self) and \
self._obj.im_self is self._instance:
# Wrap it and set it back on the class
setattr(self._instance, self._attr, classmethod(self._obj.im_func))
else:
setattr(self._instance, self._attr, self._obj)
class StubFunction(Stub):
'''
Stub a function.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object that is an unbound method
'''
super(StubFunction, self).__init__(obj, attr)
if not self._attr:
if getattr(obj, '__module__', None):
self._instance = sys.modules[obj.__module__]
elif getattr(obj, '__self__', None):
self._instance = obj.__self__
else:
raise UnsupportedStub("Failed to find instance of %s" % (obj))
if getattr(obj, 'func_name', None):
self._attr = obj.func_name
elif getattr(obj, '__name__', None):
self._attr = obj.__name__
else:
raise UnsupportedStub("Failed to find name of %s" % (obj))
else:
self._instance = self._obj
self._obj = getattr(self._instance, self._attr)
# This handles the case where we're stubbing a special method that's
# inherited from object, and so instead of calling setattr on teardown,
# we want to call delattr. This is particularly important for not
# seeing those stupid DeprecationWarnings after StubNew
self._was_object_method = False
if hasattr(self._instance, '__dict__'):
self._was_object_method = \
self._attr not in self._instance.__dict__.keys() and\
self._attr in object.__dict__.keys()
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: Does this change if was_object_method?
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
if not self._was_object_method:
setattr(self._instance, self._attr, self._obj)
else:
delattr(self._instance, self._attr)
class StubNew(StubFunction):
'''
Stub out the constructor, but hide the fact that we're stubbing "__new__"
and act more like we're stubbing "__init__". Needs to use the logic in
the StubFunction ctor.
'''
_cache = {}
def __new__(self, klass, *args):
'''
Because we're not saving the stub into any attribute, then we have
to do some faking here to return the same handle.
'''
rval = self._cache.get(klass)
if not rval:
rval = self._cache[klass] = super(
StubNew, self).__new__(self, *args)
rval._allow_init = True
else:
rval._allow_init = False
return rval
def __init__(self, obj):
'''
Overload the initialization so that we can hack access to __new__.
'''
if self._allow_init:
self._new = obj.__new__
super(StubNew, self).__init__(obj, '__new__')
self._type = obj
def __call__(self, *args, **kwargs):
'''
When calling the new function, strip out the first arg which is
the type. In this way, the mocker writes their expectation as if it
was an __init__.
'''
return super(StubNew, self).__call__(*(args[1:]), **kwargs)
def call_orig(self, *args, **kwargs):
'''
Calls the original function. Simulates __new__ and __init__ together.
'''
rval = super(StubNew, self).call_orig(self._type)
rval.__init__(*args, **kwargs)
return rval
def _teardown(self):
'''
Overload so that we can clear out the cache after a test run.
'''
# __new__ is a super-special case in that even when stubbing a class
# which implements its own __new__ and subclasses object, the
# "Class.__new__" reference is a staticmethod and not a method (or
# function). That confuses the "was_object_method" logic in
# StubFunction which then fails to delattr and from then on the class
# is corrupted. So skip that teardown and use a __new__-specific case.
setattr(self._instance, self._attr, staticmethod(self._new))
StubNew._cache.pop(self._type)
class StubUnboundMethod(Stub):
'''
Stub an unbound method.
'''
def __init__(self, obj, attr=None):
'''
Initialize with an object that is an unbound method
'''
# Note: It doesn't appear that there's any way to support stubbing
# by method in python3 because an unbound method has no reference
# to its parent class, it just looks like a regular function
super(StubUnboundMethod, self).__init__(obj, attr)
if self._attr is None:
self._instance = obj.im_class
self._attr = obj.im_func.func_name
else:
self._obj = getattr(obj, attr)
self._instance = obj
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
# TODO: Figure out if this can be implemented. The challenge is that
# the context of "self" has to be passed in as an argument, but there's
# not necessarily a generic way of doing that. It may fall out as a
# side-effect of the actual implementation of spies.
raise NotImplementedError("unbound method spies are not supported")
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubMethodWrapper(Stub):
'''
Stub a method-wrapper.
'''
def __init__(self, obj):
'''
Initialize with an object that is a method wrapper.
'''
super(StubMethodWrapper, self).__init__(obj)
self._instance = obj.__self__
self._attr = obj.__name__
setattr(self._instance, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._instance.__class__.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._obj(*args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._instance, self._attr, self._obj)
class StubWrapperDescriptor(Stub):
'''
Stub a wrapper-descriptor. Only works when we can fetch it by name. Because
the w-d object doesn't contain both the instance ref and the attribute name
to be able to look it up. Used for mocking object.__init__ and related
builtin methods when subclasses that don't overload those.
'''
def __init__(self, obj, attr_name):
'''
Initialize with an object that is a method wrapper.
'''
super(StubWrapperDescriptor, self).__init__(obj, attr_name)
self._orig = getattr(self._obj, self._attr)
setattr(self._obj, self._attr, self)
@property
def name(self):
return "%s.%s" % (self._obj.__name__, self._attr)
def call_orig(self, *args, **kwargs):
'''
Calls the original function.
'''
return self._orig(self._obj, *args, **kwargs)
def _teardown(self):
'''
Replace the original method.
'''
setattr(self._obj, self._attr, self._orig)
| 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys, urllib, urllib2, re, xbmcplugin, xbmcgui, xbmcaddon
dbg = False
#dbg = True
pluginhandle = int(sys.argv[1])
settings = xbmcaddon.Addon(id='plugin.video.mrskin')
translation = settings.getLocalizedString
forceViewMode = settings.getSetting("forceViewMode") == "true"
useThumbAsFanart = settings.getSetting("useThumbAsFanart") == "true"
maxViewPages = int(settings.getSetting("maxViewPages"))*2
if maxViewPages == 0: maxViewPages = 1
viewMode = str(settings.getSetting("viewMode"))
startpage = 'http://www.mrskin.com'
playlist = 'http://www.mrskin.com/video/playlist?t=<tid>'
playlistPlayer = 'http://www.mrskin.com/feeds/playlistPlayer/xml?options[quality]=hd&options[id]=<pid>'
contrex = ['hd: \'([^\']*)\'', 'file: \'([^\']*)\'', 'resources:[^<]*"file":"([^"]*)"']
def index():
addDir('Original Videos', '', 'origVideos', '')
addDir('Playlists', '', 'indexPlaylists', '')
xbmcplugin.endOfDirectory(pluginhandle)
def indexPlaylists():
url = startpage + '/playlists'
if dbg: print 'open ' + url
content = getUrl(url)
for categories in re.compile('<ul[^>]*id="video-categories"[^>]*>.*?</ul>', re.DOTALL).findall(content):
for href, title in re.compile('<li[^>]*>[^<]*<a[^>]*href="([^"]*)"[^>]*>[^<]*<i[^>]*></i>([^<]*)<', re.DOTALL).findall(categories):
if 'http' not in href: href = startpage + href
title = cleanTitle(title).title()
if dbg: print title + ' --> ' + href
addDir(title, href, 'showPlaylists')
xbmcplugin.endOfDirectory(pluginhandle)
def showPlaylists(url):
if dbg: print 'open ' + url
content = getUrl(url)
for collection in re.compile('<div[^>]*id="video-playlist-collection">.*?</div>', re.DOTALL).findall(content):
for pid, pic, title in re.compile('<li[^>]*>[^<]*<a[^>]*href="[^"]*p([0-9]+).html[^"]*"[^>]*>[^<]*<img[^>]*src="([^"]*)"[^>]*alt="([^"]*)"', re.DOTALL).findall(collection):
if 'http' not in pic: pic = startpage + pic
href = playlistPlayer.replace('<pid>', pid)
title = cleanTitle(title)
if dbg: print title + ' --> ' + href + ' --> ' + pic
addDir(title, href, 'showPlVideos', pic)
for nextpage in re.compile('<li><a[^>]*href="([^"]*)"[^>]*>»', re.DOTALL).findall(content):
if 'http' not in nextpage: nextpage = startpage + nextpage
if dbg: print 'next page ' + nextpage
addDir('Next Page', nextpage, 'showPlaylists')
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode: xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def showPlVideos(url):
if dbg: print 'open ' + url
content = getUrl(url)
for item in re.compile('<item>(.*?)</item>', re.DOTALL).findall(content):
title = cleanTitle(re.compile('<jwplayer:clean_title>([^<]*)</jwplayer:clean_title>', re.DOTALL).findall(item)[0])
desc = re.compile('<description><!\[CDATA\[([^<]*)\]\]></description>', re.DOTALL).findall(item)[0]
img = re.compile('<jwplayer:thumbnail>([^<]*)</jwplayer:thumbnail>', re.DOTALL).findall(item)[0]
href = re.compile('<jwplayer:file>([^<]*)</jwplayer:file>', re.DOTALL).findall(item)[0]
if 'http' not in href: href = startpage + href
if dbg: print title + ' --> ' + href + ' --> ' + img
addLink(title, href, 'playVideo', img)
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode: xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def origVideos():
url = startpage + '/video'
if dbg: print 'open ' + url
content = getUrl(url)
for tagid, cat in re.compile('<a[^>]*data-tag="([^"]*)"[^>]*>[^<]*<i[^>]*>[^<]*</i>([^<]*)</a>', re.DOTALL).findall(content):
cat = cleanTitle(cat)
href = playlist.replace('<tid>', tagid)
if dbg: print cat + ' --> ' + href
addDir(cat, href, 'showVideos')
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode: xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def showVideos(url):
if dbg: print 'open ' + url
content = getUrl(url)
for href, img, title in re.compile('<a[^>]*href="([^"]*)"[^>]*class="[^"]*video[^"]*plain[^"]*"[^>]*>[^<]*<img[^>]*src="([^"]*)"[^>]*alt="([^"]*)"[^>]*>', re.DOTALL).findall(content):
href = startpage + href
title = cleanTitle(title)
if dbg: print 'add link: title=' + title + ' href=' +href + ' img=' + img
addLink(title, href, 'playVideo', img)
for nextpage in re.compile('<li>[^<]*<a[^>]*href="(/video/playlist[^"]*)"[^>]*data-page="[^"]*">[^<]*»[^<]*</a>[^<]*</li>', re.DOTALL).findall(content):
nextpage = startpage + nextpage
if dbg: print 'next page ' + nextpage
addDir('Next Page', nextpage, 'showVideos')
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode: xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def getVideoUrl(content):
for rex in contrex:
if dbg: print 'try search with ' + rex
match = re.compile(rex, re.DOTALL).findall(content)
if match: return match[0]
def playVideo(url):
if dbg: print 'play video: ' + url
if '_pma' in url:
video = url
else:
content = getUrl(url)
video = getVideoUrl(content)
if video:
listitem = xbmcgui.ListItem(path=video.replace('\\', ''))
return xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
else:
xbmc.executebuiltin('Notification(Video not found., 5000)')
def cleanTitle(title):
#title = re.sub('<[^>]*>', ' ', title)
#title = re.sub('&#\d{3};', ' ', title)
title = title.replace('<','<').replace('>','>').replace('&','&').replace('"','"').replace('ß','ß').replace('–','-').replace(' ', ' ')
#title = title.replace('Ä','Ä').replace('Ü','Ü').replace('Ö','Ö').replace('ä','ä').replace('ü','ü').replace('ö','ö')
#title = title.replace('„','"').replace('“','"')
title = re.sub('\s+', ' ', title)
return title.strip()
def getUrl(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0')
response = urllib2.urlopen(req, timeout=30)
link = response.read()
response.close()
return link
def parameters_string_to_dict(parameters):
''' Convert parameters encoded in a URL to a dict. '''
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def addLink(name, url, mode, iconimage, fanart=''):
u = sys.argv[0] + "?url=" + urllib.quote_plus(url) + "&mode=" + str(mode)
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
liz.setProperty('IsPlayable', 'true')
if useThumbAsFanart: liz.setProperty('fanart_image', fanart)
return xbmcplugin.addDirectoryItem(handle=pluginhandle, url=u, listitem=liz)
def addDir(name, url, mode, iconimage=''):
#name = '* ' + name
u = sys.argv[0] + "?url=" + urllib.quote_plus(url) + "&mode=" + str(mode)
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
return xbmcplugin.addDirectoryItem(handle=pluginhandle, url=u, listitem=liz, isFolder=True)
params = parameters_string_to_dict(sys.argv[2])
mode = params.get('mode')
url = params.get('url')
if type(url) == type(str()): url = urllib.unquote_plus(url)
if mode == 'showVideos': showVideos(url)
elif mode == 'playVideo': playVideo(url)
elif mode == 'origVideos': origVideos()
elif mode == 'indexPlaylists': indexPlaylists()
elif mode == 'showPlaylists': showPlaylists(url)
elif mode == 'showPlVideos': showPlVideos(url)
else: index() | 0.029533 |
#Please write your p2p chat client here
#
#Must act as both a server and a client
import sys
import socket
import select
def prompt():
sys.stdout.write("> ")
sys.stdout.flush()
def connect_to_peer(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
try:
sock.connect((host, port))
ESTABLISHED_PEERS[sock] = (host, port)
except:
print("can't connect to peer", host, port, sys.exc_info()[0])
print(type(host), type(port))
sys.exit()
def print_help():
#TODO
print("command | function\n"
"----------------------------+------------------------------------\n"
"-help | displays this table\n"
"-online_users | shows who is online\n"
"-connect [ip] [port] | connect to a peer\n"
"-disconnect [ip] [port] | disconnect from a peer\n"
"-talk [ip] [port] [message] | sends a message to a peer\n"
"-logoff | disconnect from the registry server")
if __name__ == "__main__":
if len(sys.argv) < 3:
print('Usage: python peer.py hostname port')
sys.exit()
mothership = sys.argv[1] # mothership is the registry server
port = int(sys.argv[2])
CONNECTION_LIST = [] # necessary?
ONLINE_PEERS = [] # list of tuples (addr, port)
ESTABLISHED_PEERS = {}
RECV_BUFFER = 4096
mother_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mother_sock.settimeout(2)
try:
mother_sock.connect((mothership, port))
except:
print('Unable to connect to registry server', sys.exc_info()[0])
sys.exit()
print('Connected to registry server.')
ESTABLISHED_PEERS[mother_sock] = (mothership, port)
ESTABLISHED_PEERS[sys.stdin] = ('', 0)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('', 0))
server_socket.listen(10)
server_addr = server_socket.getsockname()[0]
server_port = server_socket.getsockname()[1]
ESTABLISHED_PEERS[server_socket] = (server_addr, server_port)
print('I am:', server_addr, server_port)
mother_sock.send(str(server_port).encode())
while 1:
read_sockets, write_sockets, error_sockets = select.select(ESTABLISHED_PEERS.keys(), [], [])
for rsock in read_sockets:
if rsock == mother_sock:
#print('mother_sock')
data = rsock.recv(RECV_BUFFER)
if not data:
del ESTABLISHED_PEERS[mother_sock]
else:
print("Online peers:\n", data.decode())
prompt()
elif rsock == server_socket:
#print('server_sock')
new_sock, addr = server_socket.accept()
ESTABLISHED_PEERS[new_sock] = addr
elif rsock == sys.stdin:
#print('sys.stdin')
data = sys.stdin.readline().strip()
#print('data=' + data)
command = data.split(' ', 1)[0]
#print('data', data)
if command == '-help':
print_help()
elif command == '-online_users':
print('fetching online users')
mother_sock.send("REQ::ONLINE_USERS".encode())
elif command == '-connect':
host = data.split(' ', -1)[1]
port = int(data.split(' ', -1)[2])
connect_to_peer(host, port)
elif command == '-disconnect':
host = data.split(' ', -1)[1]
port = int(data.split(' ', -1)[2])
for est_sock, est_addr in ESTABLISHED_PEERS.items():
if est_addr == (host, port):
del ESTABLISHED_PEERS[est_sock]
break
else:
print("No such peer connected.")
elif command == '-talk':
host = data.split(' ', -1)[1]
port = int(data.split(' ', -1)[2])
mesg = data.split(' ', 3)[3]
for est_sock, est_addr in ESTABLISHED_PEERS.items():
if est_addr == (host, port):
est_sock.send(mesg.encode())
print('you:', mesg)
break
else:
print("No such peer connected.")
elif command == '-logoff':
print('will logoff')
mother_sock.send("REQ::LOGOFF".encode())
else:
print('invalid command:', command)
print_help()
prompt()
else:
data = rsock.recv(RECV_BUFFER)
if not data:
print('<disconnected from peer>')
del ESTABLISHED_PEERS[rsock]
else:
#message from peer
print(rsock.getsockname()[0], "says:", data.decode())
#sys.stdout.write(data.decode())
#sys.stdout.flush()
#print(data.decode())
prompt()
| 0.035341 |
from django.contrib.auth.models import User
from django.db import models
# Forward declared intermediate model
class Membership(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
# using custom id column to test ticket #11107
class UserMembership(models.Model):
id = models.AutoField(db_column='usermembership_id', primary_key=True)
user = models.ForeignKey(User, models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
price = models.IntegerField(default=100)
class Person(models.Model):
name = models.CharField(max_length=128)
def __str__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=128)
# Membership object defined as a class
members = models.ManyToManyField(Person, through=Membership)
user_members = models.ManyToManyField(User, through='UserMembership')
def __str__(self):
return self.name
# Using to_field on the through model
class Car(models.Model):
make = models.CharField(max_length=20, unique=True, null=True)
drivers = models.ManyToManyField('Driver', through='CarDriver')
def __str__(self):
return str(self.make)
class Driver(models.Model):
name = models.CharField(max_length=20, unique=True, null=True)
class Meta:
ordering = ('name',)
def __str__(self):
return str(self.name)
class CarDriver(models.Model):
car = models.ForeignKey('Car', models.CASCADE, to_field='make')
driver = models.ForeignKey('Driver', models.CASCADE, to_field='name')
def __str__(self):
return "pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver)
# Through models using multi-table inheritance
class Event(models.Model):
name = models.CharField(max_length=50, unique=True)
people = models.ManyToManyField('Person', through='IndividualCompetitor')
special_people = models.ManyToManyField(
'Person',
through='ProxiedIndividualCompetitor',
related_name='special_event_set',
)
teams = models.ManyToManyField('Group', through='CompetingTeam')
class Competitor(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
class IndividualCompetitor(Competitor):
person = models.ForeignKey(Person, models.CASCADE)
class CompetingTeam(Competitor):
team = models.ForeignKey(Group, models.CASCADE)
class ProxiedIndividualCompetitor(IndividualCompetitor):
class Meta:
proxy = True
| 0 |
# -*- coding: utf-8 -*-
from codecs import open
import sys
#This script evaluates the systems on the SemEval 2018 task on Emoji Prediction.
#It takes the gold standard and system's output file as input and prints the results in terms of macro and micro average F-Scores (0-100).
def f1(precision,recall):
return (2.0*precision*recall)/(precision+recall)
def main(path_goldstandard, path_outputfile):
truth_dict={}
output_dict_correct={}
output_dict_attempted={}
truth_file_lines=open(path_goldstandard,encoding='utf8').readlines()
submission_file_lines=open(path_outputfile,encoding='utf8').readlines()
if len(submission_file_lines)!=len(truth_file_lines): sys.exit('ERROR: Number of lines in gold and output files differ')
for i in range(len(submission_file_lines)):
line=submission_file_lines[i]
emoji_code_gold=truth_file_lines[i].replace("\n","")
if emoji_code_gold not in truth_dict: truth_dict[emoji_code_gold]=1
else: truth_dict[emoji_code_gold]+=1
emoji_code_output=submission_file_lines[i].replace("\n","")
if emoji_code_output==emoji_code_gold:
if emoji_code_output not in output_dict_correct: output_dict_correct[emoji_code_gold]=1
else: output_dict_correct[emoji_code_output]+=1
if emoji_code_output not in output_dict_attempted: output_dict_attempted[emoji_code_output]=1
else: output_dict_attempted[emoji_code_output]+=1
precision_total=0
recall_total=0
num_emojis=len(truth_dict)
attempted_total=0
correct_total=0
gold_occurrences_total=0
f1_total=0
for emoji_code in truth_dict:
gold_occurrences=truth_dict[emoji_code]
if emoji_code in output_dict_attempted: attempted=output_dict_attempted[emoji_code]
else: attempted=0
if emoji_code in output_dict_correct: correct=output_dict_correct[emoji_code]
else: correct=0
if attempted!=0:
precision=(correct*1.0)/attempted
recall=(correct*1.0)/gold_occurrences
if precision!=0.0 or recall!=0.0: f1_total+=f1(precision,recall)
attempted_total+=attempted
correct_total+=correct
gold_occurrences_total+=gold_occurrences
macrof1=f1_total/(num_emojis*1.0)
precision_total_micro=(correct_total*1.0)/attempted_total
recall_total_micro=(correct_total*1.0)/gold_occurrences_total
if precision_total_micro!=0.0 or recall_total_micro!=0.0: microf1=f1(precision_total_micro,recall_total_micro)
else: microf1=0.0
print ("Macro F-Score (official): "+str(round(macrof1*100,3)))
print ("-----")
print ("Micro F-Score: "+str(round(microf1*100,3)))
print ("Precision: "+str(round(precision_total_micro*100,3)))
print ("Recall: "+str(round(recall_total_micro*100,3)))
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) >= 2:
path_goldstandard = args[0]
path_outputfile = args[1]
main(path_goldstandard, path_outputfile)
else:
sys.exit('''
Requires:
path_goldstandard -> Path of the gold standard
path_outputfile -> Path of the system's outputfile
''')
| 0.027855 |
# Copyright 2017 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from flask import Flask
from fake_ubersmith.api.adapters.data_store import DataStore
from fake_ubersmith.api.ubersmith import UbersmithBase
class TestAdministrativeLocal(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
data_store = DataStore()
self.ubersmith_base = UbersmithBase(data_store)
self.ubersmith_base.hook_to(self.app)
def test_enable_crash_mode(self):
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "hidden.enable_crash_mode",
}
)
self.assertEqual(resp.status_code, 200)
self.assertTrue(self.ubersmith_base.crash_mode)
def test_disable_crash_mode(self):
self.ubersmith_base.crash_mode = True
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "hidden.disable_crash_mode",
}
)
self.assertEqual(resp.status_code, 200)
self.assertFalse(self.ubersmith_base.crash_mode)
def test_any_api_call_will_return_500_when_crash_mode_enabled(self):
self.ubersmith_base.crash_mode = True
with self.app.test_client() as c:
resp = c.post(
'api/2.0/',
data={
"method": "any.api_call",
}
)
self.assertEqual(resp.status_code, 500)
| 0 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2018-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Various commands."""
import os
import signal
import functools
import logging
import typing
try:
import hunter
except ImportError:
hunter = None
from PyQt5.QtCore import Qt
from PyQt5.QtPrintSupport import QPrintPreviewDialog
from qutebrowser.api import cmdutils, apitypes, message, config
@cmdutils.register(name='reload')
@cmdutils.argument('tab', value=cmdutils.Value.count_tab)
def reloadpage(tab: typing.Optional[apitypes.Tab],
force: bool = False) -> None:
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
if tab is not None:
tab.reload(force=force)
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.count_tab)
def stop(tab: typing.Optional[apitypes.Tab]) -> None:
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
if tab is not None:
tab.stop()
def _print_preview(tab: apitypes.Tab) -> None:
"""Show a print preview."""
def print_callback(ok: bool) -> None:
if not ok:
message.error("Printing failed!")
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(functools.partial(
tab.printing.to_printer, callback=print_callback))
diag.exec_()
def _print_pdf(tab: apitypes.Tab, filename: str) -> None:
"""Print to the given PDF file."""
tab.printing.check_pdf_support()
filename = os.path.expanduser(filename)
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(filename)
logging.getLogger('misc').debug("Print to file: {}".format(filename))
@cmdutils.register(name='print')
@cmdutils.argument('tab', value=cmdutils.Value.count_tab)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(tab: typing.Optional[apitypes.Tab],
preview: bool = False, *,
pdf: str = None) -> None:
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
if tab is None:
return
try:
if preview:
_print_preview(tab)
elif pdf:
_print_pdf(tab, pdf)
else:
tab.printing.show_dialog()
except apitypes.WebTabError as e:
raise cmdutils.CommandError(e)
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def home(tab: apitypes.Tab) -> None:
"""Open main startpage in current tab."""
if tab.navigation_blocked():
message.info("Tab is pinned!")
else:
tab.load_url(config.val.url.start_pages[0])
@cmdutils.register(debug=True)
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def debug_dump_page(tab: apitypes.Tab, dest: str, plain: bool = False) -> None:
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
dest = os.path.expanduser(dest)
def callback(data: str) -> None:
"""Write the data to disk."""
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(maxsplit=0)
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def insert_text(tab: apitypes.Tab, text: str) -> None:
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
def _insert_text_cb(elem: typing.Optional[apitypes.WebElement]) -> None:
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except apitypes.WebElemError as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('filter_', choices=['id'])
def click_element(tab: apitypes.Tab, filter_: str, value: str, *,
target: apitypes.ClickTarget =
apitypes.ClickTarget.normal,
force_event: bool = False) -> None:
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
force_event: Force generating a fake click event.
"""
def single_cb(elem: typing.Optional[apitypes.WebElement]) -> None:
"""Click a single element."""
if elem is None:
message.error("No element found with id {}!".format(value))
return
try:
elem.click(target, force_event=force_event)
except apitypes.WebElemError as e:
message.error(str(e))
return
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
@cmdutils.register(debug=True)
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def debug_webaction(tab: apitypes.Tab, action: str, count: int = 1) -> None:
"""Execute a webaction.
Available actions:
http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit)
http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine)
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
for _ in range(count):
try:
tab.action.run_string(action)
except apitypes.WebTabError as e:
raise cmdutils.CommandError(str(e))
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.count_tab)
def tab_mute(tab: typing.Optional[apitypes.Tab]) -> None:
"""Mute/Unmute the current/[count]th tab.
Args:
count: The tab index to mute or unmute, or None
"""
if tab is None:
return
try:
tab.audio.set_muted(not tab.audio.is_muted(), override=True)
except apitypes.WebTabError as e:
raise cmdutils.CommandError(e)
@cmdutils.register()
def nop() -> None:
"""Do nothing."""
@cmdutils.register()
def message_error(text: str) -> None:
"""Show an error message in the statusbar.
Args:
text: The text to show.
"""
message.error(text)
@cmdutils.register()
@cmdutils.argument('count', value=cmdutils.Value.count)
def message_info(text: str, count: int = 1) -> None:
"""Show an info message in the statusbar.
Args:
text: The text to show.
count: How many times to show the message
"""
for _ in range(count):
message.info(text)
@cmdutils.register()
def message_warning(text: str) -> None:
"""Show a warning message in the statusbar.
Args:
text: The text to show.
"""
message.warning(text)
@cmdutils.register(debug=True)
@cmdutils.argument('typ', choices=['exception', 'segfault'])
def debug_crash(typ: str = 'exception') -> None:
"""Crash for debugging purposes.
Args:
typ: either 'exception' or 'segfault'.
"""
if typ == 'segfault':
os.kill(os.getpid(), signal.SIGSEGV)
raise Exception("Segfault failed (wat.)")
raise Exception("Forced crash")
@cmdutils.register(debug=True, maxsplit=0, no_cmd_split=True)
def debug_trace(expr: str = "") -> None:
"""Trace executed code via hunter.
Args:
expr: What to trace, passed to hunter.
"""
if hunter is None:
raise cmdutils.CommandError("You need to install 'hunter' to use this "
"command!")
try:
eval('hunter.trace({})'.format(expr))
except Exception as e:
raise cmdutils.CommandError("{}: {}".format(e.__class__.__name__, e))
| 0 |
#!/usr/bin/env python2
"""
Syncthing-GTK - IDDialog
Dialog with Device ID and generated QR code
"""
from __future__ import unicode_literals
from gi.repository import Gio, GLib
from .tools import IS_WINDOWS
from syncthing_gtk.uibuilder import UIBuilder
import urllib2, httplib, ssl
import os, tempfile, logging
log = logging.getLogger("IDDialog")
class IDDialog(object):
""" Dialog with Device ID and generated QR code """
def __init__(self, app, device_id):
self.app = app
self.device_id = device_id
self.setup_widgets()
self.ssl_ctx = create_ssl_context()
self.load_data()
def __getitem__(self, name):
""" Convince method that allows widgets to be accessed via self["widget"] """
return self.builder.get_object(name)
def show(self, parent=None):
if not parent is None:
self["dialog"].set_transient_for(parent)
self["dialog"].show_all()
def close(self):
self["dialog"].hide()
self["dialog"].destroy()
def setup_widgets(self):
# Load glade file
self.builder = UIBuilder()
self.builder.add_from_file(os.path.join(self.app.gladepath, "device-id.glade"))
self.builder.connect_signals(self)
self["vID"].set_text(self.device_id)
def load_data(self):
""" Loads QR code from Syncthing daemon """
if IS_WINDOWS:
return self.load_data_urllib()
uri = "%s/qr/?text=%s" % (self.app.daemon.get_webui_url(), self.device_id)
io = Gio.file_new_for_uri(uri)
io.load_contents_async(None, self.cb_syncthing_qr, ())
def load_data_urllib(self):
""" Loads QR code from Syncthing daemon """
uri = "%s/qr/?text=%s" % (self.app.daemon.get_webui_url(), self.device_id)
api_key = self.app.daemon.get_api_key()
opener = urllib2.build_opener(DummyHTTPSHandler(self.ssl_ctx))
if not api_key is None:
opener.addheaders = [("X-API-Key", api_key)]
a = opener.open(uri)
data = a.read()
tf = tempfile.NamedTemporaryFile("wb", suffix=".png", delete=False)
tf.write(data)
tf.close()
self["vQR"].set_from_file(tf.name)
os.unlink(tf.name)
def cb_btClose_clicked(self, *a):
self.close()
def cb_syncthing_qr(self, io, results, *a):
"""
Called when QR code is loaded or operation fails. Image is then
displayed in dialog, failure is silently ignored.
"""
try:
ok, contents, etag = io.load_contents_finish(results)
if ok:
# QR is loaded, save it to temp file and let GTK to handle
# rest
tf = tempfile.NamedTemporaryFile("wb", suffix=".png", delete=False)
tf.write(contents)
tf.close()
self["vQR"].set_from_file(tf.name)
os.unlink(tf.name)
except GLib.Error as e:
if e.code in [14, 15]:
# Unauthorized. Grab CSRF token from daemon and try again
log.warning("Failed to load image using glib. Retrying with urllib2.")
self.load_data_urllib()
except Exception as e:
log.exception(e)
return
finally:
del io
def create_ssl_context():
""" May return NULL if ssl is not available """
if hasattr(ssl, "create_default_context"):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
else:
log.warning("SSL is not available, cannot verify server certificate.")
class DummyHTTPSHandler(urllib2.HTTPSHandler):
"""
Dummy HTTPS handler that ignores certificate errors. This in unsafe,
but used ONLY for QR code images.
"""
def __init__(self, ctx):
urllib2.HTTPSHandler.__init__(self)
self.ctx = ctx
def https_open(self, req):
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
if not self.ctx is None:
return httplib.HTTPSConnection(host, context=self.ctx)
return True
| 0.033546 |
#!/usr/bin/env python
"""
Robocom teleoperator node
"""
import sys, os, time
import roslib
import rospy
from sensor_msgs.msg import Joy
from r2p.msg import Velocity
topic = rospy.get_param('topic', 'robot')
setpoint_scale = rospy.get_param('setpoint_scale', {'x': 1, 'y': 1, 'w': 1})
gear_ratio = rospy.get_param('gear_ratio', (
{'x': 0.25, 'y': 0.25, 'w': 0.25},
{'x': 0.5, 'y': 0.5, 'w': 0.5},
{'x': 0.75, 'y': 0.75, 'w': 0.75},
{'x': 1, 'y': 1, 'w': 1}
))
setpoint = {'x': 0.0,'y': 0.0,'w': 0.0}
gear = 0
restart = False
def joy_cb(msg):
global gear
global setpoint
global restart
if msg.buttons[6]:
restart = True
if msg.buttons[3]:
gear = 3
if msg.buttons[2]:
gear = 2
if msg.buttons[1]:
gear = 1
if msg.buttons[0]:
gear = 0
setpoint['x'] = msg.axes[1] * setpoint_scale['x'] * gear_ratio[gear]['x']
setpoint['y'] = msg.axes[0] * setpoint_scale['y'] * gear_ratio[gear]['y']
setpoint['w'] = msg.axes[3] * setpoint_scale['w'] * gear_ratio[gear]['w']
def main():
global restart
# Initialize ROS stuff
rospy.init_node("teleop_joy")
r = rospy.Rate(20) # 20hz
pubVelocity = rospy.Publisher(topic, Velocity)
pubVelocity.publish(Velocity(0.0, 0.0, 0.0))
subJoy = rospy.Subscriber("/joy", Joy, joy_cb)
while not rospy.is_shutdown():
if restart == True:
pubVelocity.unregister()
rospy.sleep(1)
pubVelocity = rospy.Publisher(topic, Velocity)
pubVelocity.publish(Velocity(0.0, 0.0, 0.0))
restart = False
print setpoint
pubVelocity.publish(Velocity(setpoint['x'], setpoint['y'], setpoint['w']))
r.sleep()
# Stop the robot
pubVelocity.publish(Velocity(0.0, 0.0, 0.0))
# Call the 'main' function when this script is executed
if __name__ == "__main__":
try: main()
except rospy.ROSInterruptException: pass
| 0.036789 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2015 Vauxoo
# Author: Yanina Aular
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api, models
class StockPicking(models.Model):
_inherit = 'stock.picking'
@api.multi
def action_invoice_create(self, journal_id,
group=False, type='out_invoice'):
invoices = super(StockPicking, self).\
action_invoice_create(journal_id=journal_id,
group=group,
type=type)
if type == 'in_invoice':
prodlot_obj = self.env['stock.production.lot']
for picking in self:
for move in picking.move_lines:
if move and move.quant_ids:
lot = move.quant_ids[0].lot_id
if lot.supplier_invoice_line_id:
continue
for inv_id in invoices:
for inv_line in self.env['account.invoice'].\
browse(inv_id).invoice_line:
lots = prodlot_obj.\
search([('supplier_invoice_line_id',
'=',
inv_line.id)])
if inv_line.product_id.id == \
lot.product_id.id and \
len(lots) < inv_line.quantity:
lot.write({'supplier_invoice_line_id':
inv_line.id})
return invoices
| 0 |
#!/usr/bin/python
from gi.repository import Gtk, GObject
import xml.dom.minidom as dom
import sys
import copy
import operator
from widgets import *
from openocd import *
class Peripheral :
def __init__(self, name, description, group, base_address) :
self.__dict__.update({(k,v) for k,v in locals().items() if k != "self"})
self.registers = {}
def append_register(self, reg) :
reg.peripheral = self
self.registers[reg.name] = reg
def copy(self) :
return copy.deepcopy(self)
def __repr__(self) :
return "Peripheral "+self.name+"@"+hex(self.base_address)
class Register :
def __init__(self, name, display_name, description, address_offset, size, readable, writable, reset_value) :
self.__dict__.update({(k,v) for k,v in locals().items() if k != "self"})
self.fields = {}
self.cbs = []
self._auto = False
self._value = reset_value
def bind(self, cb) :
self.cbs.append(cb)
def append_field(self, field) :
field.register = self
self.fields[field.name] = field
def trigger(self) :
for cb in self.cbs :
#print("calling", cb, self._value)
cb(self._value)
for field in self.fields.values() :
for cb in field.cbs :
cb(field.value)
def read(self) :
self.value = self.peripheral.connection.memory_read(self.peripheral.base_address+self.address_offset, size=self.size)
self.trigger()
def write(self) :
self.peripheral.connection.memory_write(self.peripheral.base_address+self.address_offset, self.value, size=self.size)
@property
def auto(self) :
return self._auto
@auto.setter
def auto(self, v) :
self._auto = v
if self._auto :
self.write()
self.read()
@property
def value(self) :
return self._value
@value.setter
def value(self, v) :
print("setting", self.name, v)
if v<0 :
traceback.print_stack()
v=0
print("setting", self.name, v)
if v != self._value :
print("really setting", self.name, v)
self._value = v
self.trigger()
if self.auto :
self.write()
self.read()
class RegisterField :
def __init__(self, name, description, offset, width) :
self.__dict__.update({(k,v) for k,v in locals().items() if k != "self"})
self._value = 0
self.cbs = []
def bind(self, cb) :
self.cbs.append(cb)
@property
def value(self) :
return (self.register.value>>self.offset)&(2**self.width-1)
@value.setter
def value(self, vn) :
v = self.register.value
v &= ~((2**self.width-1)<<self.offset)
v |= vn<<self.offset
self.register.value = v
xml=dom.parse(sys.argv[1])
device = xml.getElementsByTagName("device")[0]
peripheralsNode = device.getElementsByTagName("peripherals")[0]
peripherals = {}
for peripheral in [node for node in peripheralsNode.getElementsByTagName("peripheral") if node.parentNode==peripheralsNode] :
if peripheral.getAttribute("derivedFrom") == "" :
name = [node for node in peripheral.childNodes if node.nodeName=="name"][0].firstChild.nodeValue
description = " ".join([node for node in peripheral.childNodes if node.nodeName=="description"][0].firstChild.nodeValue.split())
try :
group = [node for node in peripheral.childNodes if node.nodeName=="group"][0].firstChild.nodeValue
except IndexError :
group = ""
base_address = int([node for node in peripheral.childNodes if node.nodeName=="baseAddress"][0].firstChild.nodeValue, 0)
#print(name, description, group, base_address)
p = Peripheral(name, description, group, base_address)
peripherals[p.name] = p
for register in [node for node in peripheral.getElementsByTagName("registers")[0].childNodes if node.nodeName=="register"] :
name = [node for node in register.childNodes if node.nodeName=="name"][0].firstChild.nodeValue
try :
display_name = [node for node in register.childNodes if node.nodeName=="display_name"][0].firstChild.nodeValue
except IndexError :
display_name = name
description = " ".join([node for node in register.childNodes if node.nodeName=="description"][0].firstChild.nodeValue.split())
address_offset = int([node for node in register.childNodes if node.nodeName=="addressOffset"][0].firstChild.nodeValue, 0)
size = int([node for node in register.childNodes if node.nodeName=="size"][0].firstChild.nodeValue, 0)
try :
reset_value = int([node for node in register.childNodes if node.nodeName=="resetValue"][0].firstChild.nodeValue, 0)
except (IndexError, ValueError) :
reset_value = 0
try :
readable = "read" in [node for node in register.childNodes if node.nodeName=="access"][0].firstChild.nodeValue
writable = "write" in [node for node in register.childNodes if node.nodeName=="access"][0].firstChild.nodeValue
except IndexError :
readable = True
writable = True
r=Register(name, display_name, description, address_offset, size, readable, writable, reset_value)
p.append_register(r)
fields = register.getElementsByTagName("fields")
if fields.length > 0:
for field in [node for node in fields[0].childNodes if node.nodeName=="field"] :
name = [node for node in field.childNodes if node.nodeName=="name"][0].firstChild.nodeValue
description = " ".join([node for node in field.childNodes if node.nodeName=="description"][0].firstChild.nodeValue.split())
offset = int([node for node in field.childNodes if node.nodeName=="bitOffset"][0].firstChild.nodeValue, 0)
width = int([node for node in field.childNodes if node.nodeName=="bitWidth"][0].firstChild.nodeValue, 0)
f=RegisterField(name, description, offset, width)
r.append_field(f)
#r.reset()
else :
derived = peripheral.getAttribute("derivedFrom")
p = peripherals[derived].copy()
p.name = [node for node in peripheral.childNodes if node.nodeName=="name"][0].firstChild.nodeValue
p.base_address = int([node for node in peripheral.childNodes if node.nodeName=="baseAddress"][0].firstChild.nodeValue, 0)
peripherals[p.name] = p
builder = Gtk.Builder()
builder.add_from_file("svd.glade")
window = builder.get_object("window1")
window.show_all()
class Handler :
def quit(self, widget) :
Gtk.main_quit()
builder.connect_signals(Handler())
peripheralwidgets = []
connection = OOCDTelnet()
for peripheral in peripherals.values() :
peripheral.connection=connection
box = builder.get_object("box1")
for peripheral in sorted(peripherals.values(), key=operator.attrgetter("name")) :
peripheralwidget = PeripheralWidget(peripheral)
peripheralwidgets.append(peripheralwidget)
box.pack_start(peripheralwidget, True, True, 0)
box.show_all()
Gtk.main()
| 0.046497 |
import string
import httplib, sys
import myparser
import re
from search_results import *
class search_jigsaw:
def __init__(self,word,options):
self.word=word.replace(' ', '%20')
self.last_results=u""
self.total_results=u""
self.server="www.jigsaw.com"
self.hostname="www.jigsaw.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.quantity=100
self.limit=int(options.limit)
self.counter=0
def do_search(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/FreeTextSearch.xhtml?opCode=search&autoSuggested=True&freeText=" + self.word)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, response_headers = h.getreply()
encoding=response_headers['content-type'].split('charset=')[-1]
self.last_results = unicode(h.getfile().read(), encoding)
self.total_results+=self.last_results
def check_next(self):
renext = re.compile('> Next <')
nextres=renext.findall(self.last_results)
return True if nextres !=[] else False
def get_results(self):
raw_results=myparser.parser(self.total_results,self.word)
results = search_results()
results.people = raw_results.people_jigsaw()
return results
def process(self):
print "[-] Searching Jigsaw:"
while (self.counter < self.limit):
self.do_search()
if self.check_next() == True:
self.counter+=self.quantity
print "\r\tProcessed "+ str(self.counter) + " results..."
else:
break
| 0.043391 |
import sublime, json, urllib
CFDOCS_TEMPLATE = ""
CFDOCS_PARAM_TEMPLATE = ""
CFDOCS_ERROR_TEMPLATE = ""
CFDOCS_BASE_URL = "https://raw.githubusercontent.com/foundeo/cfdocs/master/data/en/"
def load(plugin_path):
global CFDOCS_TEMPLATE, CFDOCS_PARAM_TEMPLATE, CFDOCS_ERROR_TEMPLATE
CFDOCS_TEMPLATE = load_template(plugin_path, "cfdocs")
CFDOCS_PARAM_TEMPLATE = load_template(plugin_path, "cfdocs_param")
CFDOCS_ERROR_TEMPLATE = load_template(plugin_path, "cfdocs_error")
def load_template(plugin_path, filename):
with open(plugin_path + "/templates/" + filename + ".html", "r") as f:
html_string = f.read()
return html_string
def get_cfdoc(function_or_tag):
data, success = fetch_cfdoc(function_or_tag)
if success:
return build_cfdoc_html(function_or_tag, data)
return build_cfdoc_error(function_or_tag, data)
def fetch_cfdoc(function_or_tag):
full_url = CFDOCS_BASE_URL + function_or_tag + ".json"
try:
json_string = urllib.request.urlopen(full_url).read().decode("utf-8")
except urllib.error.HTTPError as e:
data = {"error_message": "Unable to fetch " + function_or_tag + ".json<br>" + str(e)}
return data, False
try:
data = json.loads(json_string)
except ValueError as e:
data = {"error_message": "Unable to decode " + function_or_tag + ".json<br>ValueError: " + str(e)}
return data, False
return data, True
def build_cfdoc_html(function_or_tag, data):
variables = { "function_or_tag": function_or_tag, "href": "http://cfdocs.org/" + function_or_tag, "params": "" }
variables["syntax"] = data["syntax"].replace("<","<").replace(">",">")
variables["description"] = data["description"].replace("<","<").replace(">",">").replace("\n","<br>")
if len(data["returns"]) > 0:
variables["syntax"] += ":" + data["returns"]
if len(data["params"]) > 0:
variables["params"] = "<ul>"
for param in data["params"]:
param_variables = {"name": param["name"], "description": param["description"].replace("\n","<br>"), "values": ""}
if len(param["values"]):
param_variables["values"] = "<em>values:</em> " + ", ".join([str(value) for value in param["values"]])
variables["params"] += "<li>" + sublime.expand_variables(CFDOCS_PARAM_TEMPLATE, param_variables) + "</li>"
variables["params"] += "</ul>"
return sublime.expand_variables(CFDOCS_TEMPLATE, variables)
def build_cfdoc_error(function_or_tag, data):
return sublime.expand_variables(CFDOCS_ERROR_TEMPLATE, data) | 0.025271 |
from nose.plugins.attrib import attr
from perfpoint import *
import numpy as np
sample_counters = [
(4, '00c0', 'instructions'),
]
@docstring_name
def check_data_shape(data, interval):
'''Check that align produces any data at all'''
assert len(data.shape)==2, 'Wrong dimensionality in data'
assert data.shape[0]>5, 'Insufficient rows in data'
assert data.shape[1]==len(sample_counters)+1, 'Insufficient columns in data'
@docstring_name
def check_data_values(data, interval):
'''Check for ridiculous values in the data'''
assert np.all(data>=0), 'Negative values found in data'
for (c,counter_tuple) in enumerate(sample_counters):#xrange(1,data.shape[1]+1):
assert np.count_nonzero(data[:,c+1])>5, 'Mostly zero data found in column '+str(c+1)+': '+str(counter_tuple)
@attr('check','align')
def test_alignment_checks():
interval = 1000000
align = Alignment(ipoint_interval=interval, argv=[compute])
for counter in sample_counters:
align.add_counter(*counter)
data = align.run()
# Run several different diagnostics
yield (check_data_shape, data, interval)
yield (check_data_values, data, interval)
@attr('check','align')
@docstring_name
def test_ragged_truncate():
'''Check that we can truncate ragged arrays'''
data=[ np.vstack([ np.arange(1,100), np.arange(1,100) ]).T,
np.vstack([ np.arange(1,200), np.arange(1,200) ]).T ]
new = align.align_truncated(data,1)
assert new.shape==(99,3), 'align_truncated didnt truncate correctly'
assert np.all(new[:,1]==new[:,2]), 'align_truncated didnt align the data'
@attr('check','align')
@docstring_name
def test_ragged_scaled():
'''Check that we can scale ragged arrays'''
data=[ np.vstack([ np.arange(1,100), np.arange(1,100) ]).T,
np.vstack([ np.arange(3,300), np.arange(3,300) ]).T ]
new = align.align_scaled(data,2)
assert new.shape==(99,3), 'align_scaled didnt scale correctly'
assert np.all(new[:,1]==new[:,2]), 'align_scaled didnt align the data'
@attr('check','align')
@docstring_name
def test_smoothing():
'''Check oversample smoothing'''
trace = np.vstack([ np.arange(1,100), np.arange(1,100) ]).T
overtrace = np.empty(trace.shape)
overtrace[:,0] = trace[:,0]
# "oversample" 8x
for i in xrange(0,99):
if i&7:
overtrace[i,1] = trace[i&(~7),1]
else:
overtrace[i,1] = trace[i,1]
# now correct it back
overtrace = align.correct_for_oversampling(overtrace)
recovered = trace[:-10]==overtrace[:-10] # the last bit isn't recovered
assert np.all(recovered), 'Oversample smoothing didnt recover original samples:\n'+str(np.dstack([trace[~recovered],overtrace[~recovered]]))
#@attr('stats')
#def test_alignment_checks():
# interval = 1000000
# align = Alignment(ipoint_interval=interval, argv=[compute]
# for counter in sample_counters:
# align.add_counter(*counter)
# data = align.run()
# # Run several different diagnostics
# yield (check_data, data, interval)
| 0.037491 |
# coding: utf8
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s rows deleted': '%s records cancellati',
'%s rows updated': '*** %s records modificati',
'Asíncrona': 'Asíncrona',
'Available databases and tables': 'Available databases and tables',
'Ayuda': 'Ayuda',
'Cannot be empty': 'Cannot be empty',
'Check to delete': 'Check to delete',
'Comprobantes': 'Comprobantes',
'Configuración': 'Configuración',
'Configurar': 'Configurar',
'Consultas': 'Consultas',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Cotización': 'Cotización',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'DB Model': 'DB Model',
'Database': 'Database',
'Delete:': 'Delete:',
'Detalles': 'Detalles',
'Edit': 'Edit',
'Edit This App': 'Edit This App',
'Edit current record': 'Edit current record',
'Emisión': 'Emisión',
'Estado (dummy)': 'Estado (dummy)',
'FacturaLibre': 'FacturaLibre',
'FacturaLibre. Aplicación en desarrollo': 'FacturaLibre. Aplicación en desarrollo',
'FacturaLibre. Aplicación web para factura electrónica': 'FacturaLibre. Aplicación web para factura electrónica',
'FacturaLibre: interfase alternativa': 'FacturaLibre: interfase alternativa',
'FacturaLibre: interfaz de usuario alternativa': 'FacturaLibre: interfaz de usuario alternativa',
'Hello World': 'Salve Mondo',
'Import/Export': 'Import/Export',
'Index': 'Index',
'Información General': 'Información General',
'Información Técnica': 'Información Técnica',
'Inicio': 'Inicio',
'Internal State': 'Internal State',
'Invalid Query': 'Query invalida',
'Layout': 'Layout',
'Listar comprobantes.': 'Listar comprobantes.',
'Listar detalles': 'Listar detalles',
'Main Menu': 'Main Menu',
'Menu Model': 'Menu Model',
'New Record': 'New Record',
'No databases in this application': 'No databases in this application',
'Powered by': 'Powered by',
'Query:': 'Query:',
'Rows in table': 'Rows in table',
'Rows selected': 'Rows selected',
'Secuencial': 'Secuencial',
'Servicios Web': 'Servicios Web',
'Stylesheet': 'Stylesheet',
'Sure you want to delete this object?': 'Sicuro che vuoi cancellare questo oggetto?',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'Update:': 'Update:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'View': 'View',
'WSBFE': 'WSBFE',
'WSFEX': 'WSFEX',
'WSFEv0': 'WSFEv0',
'WSFEv1': 'WSFEv1',
'WSMTXCA': 'WSMTXCA',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Ciao da web2py',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'cache': 'cache',
'change password': 'change password',
'click here for online examples': 'clicca per vedere gli esempi',
'click here for the administrative interface': "clicca per l'interfaccia administrativa",
'customize me!': 'customize me!',
'data uploaded': 'dati caricati',
'database': 'database',
'database %s select': 'database %s select',
'db': 'db',
'design': 'progetta',
'done!': 'fatto!',
'edit profile': 'edit profile',
'export as csv file': 'export as csv file',
'insert new': 'insert new',
'insert new %s': 'insert new %s',
'invalid request': 'richiesta invalida!',
'login': 'login',
'logout': 'logout',
'new record inserted': 'nuovo record inserito',
'next 100 rows': 'next 100 rows',
'or import from csv file': 'or import from csv file',
'previous 100 rows': 'previous 100 rows',
'record': 'record',
'record does not exist': 'il record non esiste',
'record id': 'record id',
'register': 'register',
'selected': 'selected',
'state': 'stato',
'table': 'table',
'unable to parse csv file': 'non so leggere questo csv file',
'Últ.ID': 'Últ.ID',
'Últ.Nro.Cbte.': 'Últ.Nro.Cbte.',
}
| 0.025828 |
# -*- coding: utf-8 -*-
# © 2004-2011 Pexego Sistemas Informáticos. (http://pexego.es)
# © 2012 NaN·Tic (http://www.nan-tic.com)
# © 2013 Acysos (http://www.acysos.com)
# © 2013 Joaquín Pedrosa Gutierrez (http://gutierrezweb.es)
# © 2014-2015 Serv. Tecnol. Avanzados - Pedro M. Baeza
# (http://www.serviciosbaeza.com)
# © 2016 Antiun Ingenieria S.L. - Antonio Espinosa
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import fields, models, api
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
@api.depends('cc_amount_untaxed',
'tax_line', 'tax_line.amount')
def _compute_amount_total_wo_irpf(self):
for invoice in self:
invoice.amount_total_wo_irpf = invoice.cc_amount_untaxed
for tax_line in invoice.tax_line:
if 'IRPF' not in tax_line.name:
invoice.amount_total_wo_irpf += tax_line.amount
amount_total_wo_irpf = fields.Float(
compute="_compute_amount_total_wo_irpf", store=True, readonly=True,
string="Total amount without IRPF taxes")
not_in_mod347 = fields.Boolean(
"Not included in 347 report",
help="If you mark this field, this invoice will not be included in "
"any AEAT 347 model report.", default=False)
| 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo.config.cfg
# Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.network import <foo>' elsewhere.
import nova.openstack.common.importutils
_network_opts = [
oslo.config.cfg.StrOpt('network_api_class',
default='nova.network.api.API',
help='The full class name of the '
'network API class to use'),
]
oslo.config.cfg.CONF.register_opts(_network_opts)
def API():
importutils = nova.openstack.common.importutils
network_api_class = oslo.config.cfg.CONF.network_api_class
if 'quantumv2' in network_api_class:
network_api_class = network_api_class.replace('quantumv2', 'neutronv2')
cls = importutils.import_class(network_api_class)
return cls()
| 0 |
from django.db import models
from django.contrib.auth.models import User
class Kategori(models.Model):
nama = models.CharField(max_length=255)
deskripsi = models.TextField()
slug = models.SlugField()
user = models.ForeignKey(User)
class Meta:
verbose_name_plural = 'Daftar Kategori'
verbose_name = 'Kategori'
def __unicode__(self):
return self.nama
class Pengeluaran(models.Model):
user = models.ForeignKey(User)
nama = models.CharField(max_length=255)
kategori = models.ForeignKey('Kategori')
tgl_buat = models.DateTimeField(auto_now_add=True)
jumlah = models.PositiveIntegerField()
harga = models.PositiveIntegerField()
total = models.PositiveIntegerField()
keterangan = models.TextField(blank=True,null=True)
class Meta:
verbose_name_plural = 'Daftar Pengeluaran'
verbose_name = 'Pengeluaran'
def __unicode__(self):
return self.nama
class Pemasukan(models.Model):
user = models.ForeignKey(User)
nama = models.CharField(max_length=255)
kategori = models.ForeignKey('Kategori')
tgl_buat = models.DateTimeField(auto_now_add=True)
jumlah = models.PositiveIntegerField()
keterangan = models.TextField(blank=True,null=True)
class Meta:
verbose_name_plural = 'Daftar Pemasukan'
verbose_name = 'Pemasukan'
def __unicode__(self):
return self.nama
| 0.026182 |
#! /usr/bin/env python
"""
Copyright [1999-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
"""
import re
import requests
import json
import sys
import logging
from pprint import pformat
import postgap.REST
import postgap.Globals
from postgap.DataModel import *
from postgap.Utils import *
from postgap.GWAS_Lead_Snp_Orientation import *
class GWAS_source(object):
def run(self, diseases, iris):
"""
Returns all GWAS SNPs associated to a disease in this source
Args:
* [ string ] (trait descriptions)
* [ string ] (trait Ontology IRIs)
Returntype: [ GWAS_Association ]
"""
assert False, "This stub should be defined"
class GWASCatalog(GWAS_source):
display_name = 'GWAS Catalog'
def run(self, diseases, iris):
"""
Returns all GWAS SNPs associated to a disease in GWAS Catalog
Args:
* [ string ] (trait descriptions)
* [ string ] (trait Ontology IRIs)
Returntype: [ GWAS_Association ]
"""
if iris is not None and len(iris) > 0:
res = concatenate(self.query(query) for query in iris)
else:
res = concatenate(self.query(query) for query in diseases)
logging.debug("\tFound %i GWAS SNPs associated to diseases (%s) or EFO IDs (%s) in GWAS Catalog" % (len(res), ", ".join(diseases), ", ".join(iris)))
return res
def query(self, efo):
logging.info("Querying GWAS catalog for " + efo);
server = 'http://www.ebi.ac.uk'
url = '/gwas/rest/api/efoTraits/search/findByEfoUri?uri=%s' % (efo)
hash = postgap.REST.get(server, url)
'''
hash looks like this:
{
"_embedded": {
"efoTraits": [
{
"trait": "diabetes mellitus",
"uri": "http://www.ebi.ac.uk/efo/EFO_0000400",
"_links": {
"self": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/efoTraits/71"
},
"efoTrait": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/efoTraits/71"
},
"studies": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/efoTraits/71/studies"
},
"associations": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/efoTraits/71/associations"
}
}
}
]
},
"_links": {
"self": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/efoTraits/search/findByUri?uri=http://www.ebi.ac.uk/efo/EFO_0000400"
}
},
"page": {
"size": 20,
"totalElements": 1,
"totalPages": 1,
"number": 0
}
}
'''
list_of_GWAS_Associations = []
efoTraits = hash["_embedded"]["efoTraits"]
for efoTraitHash in efoTraits:
efoTraitLinks = efoTraitHash["_links"]
efoTraitName = efoTraitHash["trait"]
logging.info("Querying Gwas rest server for SNPs associated with " + efoTraitName)
association_rest_response = efoTraitLinks["associations"]
association_url = association_rest_response["href"]
try:
# e.g.: http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/efoTraits/71/associations
#
association_response = postgap.REST.get(association_url, "")
except:
continue
associations = association_response["_embedded"]["associations"]
'''
associations has this structure:
[
{
"riskFrequency": "NR",
"pvalueDescription": null,
"pvalueMantissa": 2,
"pvalueExponent": -8,
"multiSnpHaplotype": false,
"snpInteraction": false,
"snpType": "known",
"standardError": 0.0048,
"range": "[NR]",
"description": null,
"orPerCopyNum": null,
"betaNum": 0.0266,
"betaUnit": "unit",
"betaDirection": "increase",
"lastMappingDate": "2016-12-24T07:36:49.000+0000",
"lastUpdateDate": "2016-11-25T14:37:53.000+0000",
"pvalue": 2.0E-8,
"_links": {
"self": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/associations/16513018"
},
"association": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/associations/16513018"
},
"study": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/associations/16513018/study"
},
"snps": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/associations/16513018/snps"
},
"loci": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/associations/16513018/loci"
},
"efoTraits": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/associations/16513018/efoTraits"
},
"genes": {
"href": "http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/associations/16513018/genes"
}
}
},
...
]
'''
logging.info("Received " + str(len(associations)) + " associations with SNPs.")
logging.info("Fetching SNPs and pvalues.")
for current_association in associations:
# e.g. snp_url can be: http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/associations/16513018/snps
#
snp_url = current_association["_links"]["snps"]["href"]
snp_response = postgap.REST.get(snp_url, "")
"""
Example response:
{
_embedded: {
singleNucleotidePolymorphisms: [
{
rsId: "rs3757057",
merged: 0,
functionalClass: "intron_variant",
lastUpdateDate: "2016-12-25T03:48:35.194+0000",
_links: {}
}
]
},
_links: {}
}
"""
singleNucleotidePolymorphisms = snp_response["_embedded"]["singleNucleotidePolymorphisms"]
if (len(singleNucleotidePolymorphisms) == 0):
# sys.exit("Got no snp for a pvalue!")
continue
study_url = current_association["_links"]["study"]["href"]
study_response = postgap.REST.get(study_url, "")
"""
Example response:
{
author: "Barber MJ",
publicationDate: "2010-03-22T00:00:00.000+0000",
publication: "PLoS One",
title: "Genome-wide association of lipid-lowering response to statins in combined study populations.",
initialSampleSize: "3,928 European ancestry individuals",
replicateSampleSize: "NA",
pubmedId: "20339536",
gxe: false,
gxg: false,
genomewideArray: true,
targetedArray: false,
snpCount: 2500000,
qualifier: "~",
imputed: true,
pooled: false,
studyDesignComment: null,
accessionId: "GCST000635",
fullPvalueSet: false,
_links: {}
}
"""
study_id = study_response['accessionId']
pubmedId = study_response["publicationInfo"]["pubmedId"]
diseaseTrait = study_response["diseaseTrait"]["trait"]
ancestries = study_response["ancestries"]
"""
Example response:
{
_embedded: {
ancestries: [
{
type: "initial",
numberOfIndividuals: 3928,
description: "Los Angeles, CA; ",
previouslyReported: null,
notes: null,
_links: {}
}
]
},
_links: {}
}
"""
sample_size = sum(int(ancestry['numberOfIndividuals']) for ancestry in ancestries if ancestry['numberOfIndividuals'] is not None)
for current_snp in singleNucleotidePolymorphisms:
is_dbSNP_accession = "rs" in current_snp["rsId"]
if not(is_dbSNP_accession):
logging.warning("Did not get a valid dbSNP accession: (" + current_snp["rsId"] + ") from " + snp_url)
continue
if current_snp["rsId"] == '6':
continue
if current_snp["rsId"][-1] == u'\xa0':
current_snp["rsId"] = current_snp["rsId"].strip()
logging.debug(" received association with snp rsId: " + '{:12}'.format(current_snp["rsId"]) + " with a pvalue of " + str(current_association["pvalue"]))
associations_href = current_snp["_links"]["associations"]["href"]
associations = postgap.REST.get(associations_href, ext="")
riskAlleles = []
loci = current_association["loci"]
for locus in loci:
strongestRiskAlleles = locus["strongestRiskAlleles"]
riskAlleles.append(strongestRiskAlleles)
for riskAllele in riskAlleles:
try:
if gwas_risk_alleles_present_in_reference(riskAllele):
risk_alleles_present_in_reference = True
logging.info("Risk allele is present in reference");
else:
risk_alleles_present_in_reference = False
logging.info("Risk allele is not present in reference");
except none_of_the_risk_alleles_is_a_substitution_exception as e:
logging.warning(str(e))
logging.warning("Skipping this snp.")
continue
except variant_mapping_is_ambiguous_exception:
logging.warning("The variant mapping is ambiguous.")
logging.warning("Skipping this snp.")
continue
except some_alleles_present_in_reference_others_not_exception as e:
logging.warning(str(e));
logging.warning("Skipping this snp.")
continue
except no_dbsnp_accession_for_snp_exception as e:
logging.warning(str(e))
logging.warning("Skipping this snp.")
continue
except base_in_allele_missing_exception as e:
logging.warning(str(e));
logging.warning("Skipping this snp.")
continue
except cant_determine_base_at_snp_in_reference_exception as e:
logging.warning(str(e));
logging.warning("Skipping this snp.")
continue
except gwas_data_integrity_exception as e:
logging.warning(str(e));
logging.warning("Skipping this snp.")
continue
ci_start_value = None
ci_end_value = None
if not current_association["range"] == None:
ci_values = re.findall('\d+\.\d+', current_association["range"])
if ci_values:
try:
ci_start_value = ci_values[0]
ci_end_value = ci_values[1]
except:
pass
list_of_GWAS_Associations.append(
GWAS_Association(
disease = Disease(
name = efoTraitName,
efo = efo
),
reported_trait = diseaseTrait,
snp = current_snp["rsId"],
pvalue = current_association["pvalue"],
pvalue_description = current_association["pvalueDescription"],
source = 'GWAS Catalog',
publication = 'PMID' + pubmedId,
study = study_id,
sample_size = sample_size,
# For fetching additional information like risk allele later, if needed.
# E.g.: http://wwwdev.ebi.ac.uk/gwas/beta/rest/api/singleNucleotidePolymorphisms/9765
rest_hash = current_snp,
risk_alleles_present_in_reference = risk_alleles_present_in_reference,
odds_ratio = current_association["orPerCopyNum"],
odds_ratio_ci_start = ci_start_value,
odds_ratio_ci_end = ci_end_value,
beta_coefficient = current_association["betaNum"],
beta_coefficient_unit = current_association["betaUnit"],
beta_coefficient_direction = current_association["betaDirection"]
)
)
if len(list_of_GWAS_Associations) > 0:
logging.info("Fetched " + str(len(list_of_GWAS_Associations)) + " SNPs and pvalues.")
if len(list_of_GWAS_Associations) == 0:
logging.info("Found no associated SNPs and pvalues.")
return list_of_GWAS_Associations
class Neale_UKB(GWAS_source):
display_name = "Neale_UKB"
def run(self, diseases, iris):
"""
Returns all GWAS SNPs associated to a disease in Neale_UKB
Args:
* [ string ] (trait descriptions)
* [ string ] (trait Ontology IRIs)
Returntype: [ GWAS_Association ]
"""
logger = logging.getLogger(__name__)
# This database does not have EFOs so give up early if unneeded
if diseases == None or len(diseases) == 0:
return []
file = open(postgap.Globals.DATABASES_DIR+"/Neale_UKB.txt")
res = [ self.get_association(line, diseases, iris) for line in file ]
res = filter(lambda X: X is not None, res)
logger.info("\tFound %i GWAS SNPs associated to diseases (%s) or EFO IDs (%s) in Neale_UKB" % (len(res), ", ".join(diseases), ", ".join(iris)))
return res
def get_association(self, line, diseases, iris):
'''
Neale_UKB file format:
'''
try:
snp, disease, reported_trait, p_value, sample_size, source, study, odds_ratio, beta_coefficient, beta_coefficient_direction = line.strip().split('\t')
except:
return None
if reported_trait in diseases:
return GWAS_Association(
pvalue = float(p_value),
pvalue_description = None,
snp = snp,
disease = None,
reported_trait = reported_trait + " " + disease,
source = 'UK Biobank',
publication = source,
study = None,
sample_size = sample_size,
odds_ratio = None,
beta_coefficient = beta_coefficient,
beta_coefficient_unit = None,
beta_coefficient_direction = beta_coefficient_direction
)
else:
return None
class GRASP(GWAS_source):
display_name = "GRASP"
def run(self, diseases, iris):
"""
Returns all GWAS SNPs associated to a disease in GRASP
Args:
* [ string ] (trait descriptions)
* [ string ] (trait Ontology IRIs)
Returntype: [ GWAS_Association ]
"""
file = open(postgap.Globals.DATABASES_DIR+"/GRASP.txt")
res = [ self.get_association(line, diseases, iris) for line in file ]
res = filter(lambda X: X is not None, res)
logging.info("\tFound %i GWAS SNPs associated to diseases (%s) or EFO IDs (%s) in GRASP" % (len(res), ", ".join(diseases), ", ".join(iris)))
return res
def get_association(self, line, diseases, iris):
'''
GRASP file format:
1. NHLBIkey
2. HUPfield
3. LastCurationDate
4. CreationDate
5. SNPid(dbSNP134)
6. chr(hg19)
7. pos(hg19)
8. PMID
9. SNPid(in paper)
10. LocationWithinPaper
11. Pvalue
12. Phenotype
13. PaperPhenotypeDescription
14. PaperPhenotypeCategories
15. DatePub
16. InNHGRIcat(as of 3/31/12)
17. Journal
18. Title
19. IncludesMale/Female Only Analyses
20. Exclusively Male/Female
21. Initial Sample Description
22. Replication Sample Description
23. Platform [SNPs passing QC]
24. GWASancestryDescription
25. TotalSamples(discovery+replication)
26. TotalDiscoverySamples
27. European Discovery
28. African Discovery
29. East Asian Discovery
30. Indian/South Asian Discovery
31. Hispanic Discovery
32. Native Discovery
33. Micronesian Discovery
34. Arab/ME Discovery
35. Mixed Discovery
36. Unspecified Discovery
37. Filipino Discovery
38. Indonesian Discovery
39. Total replication samples
40. European Replication
41. African Replication
42. East Asian Replication
43. Indian/South Asian Replication
44. Hispanic Replication
45. Native Replication
46. Micronesian Replication
47. Arab/ME Replication
48. Mixed Replication
49. Unspecified Replication
50. Filipino Replication
51. Indonesian Replication
52. InGene
53. NearestGene
54. InLincRNA
55. InMiRNA
56. InMiRNABS
57. dbSNPfxn
58. dbSNPMAF
59. dbSNPalleles/het/se
60. dbSNPvalidation
XX61. dbSNPClinStatus
XX62. ORegAnno
XX63. ConservPredTFBS
XX64. HumanEnhancer
XX65. RNAedit
XX66. PolyPhen2
XX67. SIFT
XX68. LS-SNP
XX69. UniProt
XX70. EqtlMethMetabStudy
71. EFO string
'''
items = line.rstrip().split('\t')
for iri in items[70].split(','):
if iri in iris:
try:
return GWAS_Association(
pvalue = float(items[10]),
pvalue_description = None,
snp = "rs" + items[4],
disease = Disease(name = postgap.EFO.term(iri), efo = iri),
reported_trait = items[12].decode('latin1'),
source = self.display_name,
publication = items[7],
study = None,
sample_size = int(items[24]),
odds_ratio = None,
odds_ratio_ci_start = None,
odds_ratio_ci_end = None,
beta_coefficient = None,
beta_coefficient_unit = None,
beta_coefficient_direction = None,
rest_hash = None,
risk_alleles_present_in_reference = None
)
except:
return None
if items[12] in diseases:
iri = items[70].split(',')[0]
try:
return GWAS_Association(
pvalue = float(items[10]),
pvalue_description = None,
snp = "rs" + items[4],
disease = Disease(name = postgap.EFO.term(iri), efo = iri),
reported_trait = items[12].decode('latin1'),
source = self.display_name,
publication = items[7],
study = None,
sample_size = int(items[24]),
odds_ratio = None,
odds_ratio_ci_start=None,
odds_ratio_ci_end=None,
beta_coefficient = None,
beta_coefficient_unit = None,
beta_coefficient_direction = None,
rest_hash = None,
risk_alleles_present_in_reference = None
)
except:
return None
if items[12] in diseases:
iri = items[70].split(',')[0]
return GWAS_Association(
pvalue = float(items[10]),
snp = "rs" + items[4],
disease = Disease(name = postgap.EFO.term(iri), efo = iri),
reported_trait = items[12].decode('latin1'),
source = self.display_name,
study = items[7],
sample_size = int(items[24]),
odds_ratio = None,
odds_ratio_ci_start=None,
odds_ratio_ci_end=None,
beta_coefficient = None,
beta_coefficient_unit = None,
beta_coefficient_direction = None,
rest_hash = None,
risk_alleles_present_in_reference = None
)
return None
class Phewas_Catalog(GWAS_source):
display_name = "Phewas Catalog"
def run(self, diseases, iris):
"""
Returns all GWAS SNPs associated to a disease in PhewasCatalog
Args:
* [ string ] (trait descriptions)
* [ string ] (trait Ontology IRIs)
Returntype: [ GWAS_Association ]
"""
file = open(postgap.Globals.DATABASES_DIR+"/Phewas_Catalog.txt")
res = [ self.get_association(line, diseases, iris) for line in file ]
res = filter(lambda X: X is not None, res)
logging.info("\tFound %i GWAS SNPs associated to diseases (%s) or EFO IDs (%s) in Phewas Catalog" % (len(res), ", ".join(diseases), ", ".join(iris)))
return res
def get_association(self, line, diseases, iris):
'''
Phewas Catalog format:
1. chromosome
2. snp
3. phewas phenotype
4. cases
5. p-value
6. odds-ratio
7. gene_name
8. phewas code
9. gwas-associations
10. [Inserte] EFO identifier (or N/A)
'''
items = line.rstrip().split('\t')
for iri in items[9].split(','):
if iri in iris:
return GWAS_Association (
pvalue = float(items[4]),
pvalue_description = None,
snp = items[1],
disease = Disease(name = postgap.EFO.term(iri), efo = iri),
reported_trait = items[2],
source = self.display_name,
publication = "PMID24270849",
study = None,
sample_size = int(items[3]),
odds_ratio = float(items[5]),
odds_ratio_ci_start=None,
odds_ratio_ci_end=None,
beta_coefficient = None,
beta_coefficient_unit = None,
beta_coefficient_direction = None,
rest_hash = None,
risk_alleles_present_in_reference = None
)
if items[2] in diseases:
iri = items[9].split(',')[0]
return GWAS_Association (
pvalue = float(items[4]),
pvalue_description = None,
snp = items[1],
disease = Disease(name = postgap.EFO.term(iri), efo = iri),
reported_trait = items[2],
source = self.display_name,
publication = "PMID24270849",
study = None,
sample_size = int(items[3]),
odds_ratio = float(items[5]),
odds_ratio_ci_start=None,
odds_ratio_ci_end=None,
beta_coefficient = None,
beta_coefficient_unit = None,
beta_coefficient_direction = None,
rest_hash = None,
risk_alleles_present_in_reference = None
)
return None
class GWAS_File(GWAS_source):
display_name = "GWAS File"
def create_gwas_association_collector(self):
class gwas_association_collector:
def __init__(self):
self.found_list = []
def add_to_found_list(self, gwas_association):
self.found_list.append(gwas_association)
def get_found_list(self):
return self.found_list
return gwas_association_collector()
def run(self, diseases, iris):
"""
Returns all GWAS SNPs associated to a disease in known gwas files
Args:
* [ string ] (trait descriptions)
* [ string ] (trait Ontology IRIs)
Returntype: [ GWAS_Association ]
"""
gwas_data_file = postgap.Globals.GWAS_SUMMARY_STATS_FILE
if gwas_data_file is None:
return None
logging.info( "gwas_data_file = " + gwas_data_file )
pvalue_filtered_gwas_associations = self.create_gwas_association_collector()
pvalue_filter = self.create_pvalue_filter(pvalue_threshold = postgap.Globals.GWAS_PVALUE_CUTOFF)
self.parse_gwas_data_file(
gwas_data_file = gwas_data_file,
want_this_gwas_association_filter = pvalue_filter,
callback = pvalue_filtered_gwas_associations.add_to_found_list,
max_lines_to_return_threshold = None,
)
logging.info( "Found " + str(len(pvalue_filtered_gwas_associations.get_found_list())) + " gwas associations with a pvalue of " + str(postgap.Globals.GWAS_PVALUE_CUTOFF) + " or less.")
return pvalue_filtered_gwas_associations.get_found_list()
def create_gwas_clusters_with_pvalues_from_file(self, gwas_clusters, gwas_data_file):
proper_gwas_cluster = []
cluster_number = 0
for gwas_cluster in gwas_clusters:
cluster_number += 1
gwas_cluster_with_values_from_file = self.create_gwas_cluster_with_pvalues_from_file(gwas_cluster, gwas_data_file)
proper_gwas_cluster.append(gwas_cluster_with_values_from_file)
return proper_gwas_cluster
def create_gwas_cluster_with_pvalues_from_file(self, gwas_cluster, gwas_data_file):
ld_gwas_associations = self.create_gwas_association_collector()
self.parse_gwas_data_file(
gwas_data_file = gwas_data_file,
want_this_gwas_association_filter = self.create_snp_filter(gwas_cluster.ld_snps),
callback = ld_gwas_associations.add_to_found_list,
max_lines_to_return_threshold = len(gwas_cluster.ld_snps)
)
logging.info( "ld_gwas_associations.found_list: " + pformat(ld_gwas_associations.get_found_list()) )
ld_snps_converted_to_gwas_snps = []
ld_snps_that_could_not_be_converted_to_gwas_snps = []
for ld_snp in gwas_cluster.ld_snps:
gwas_associations_for_ld_snp = filter(lambda x : ld_snp.rsID == x.snp.rsID, ld_gwas_associations.get_found_list())
# If one could be found, add that.
if len(gwas_associations_for_ld_snp) == 1:
logging.info("Found " + ld_snp.rsID + " in the file! All good.")
gwas_association = gwas_associations_for_ld_snp[0]
assert type(gwas_association) is GWAS_Association, "gwas_association is GWAS_Association."
gwas_snp = GWAS_SNP(
snp = gwas_association.snp,
pvalue = gwas_association.pvalue,
z_score = None,
evidence = [ gwas_association ],
beta = gwas_association.beta_coefficient
)
ld_snps_converted_to_gwas_snps.append(gwas_snp)
# If more than one assocation was found: error.
if len(gwas_associations_for_ld_snp) > 1:
logging.info("Found more than one matching assocation for " + ld_snp.rsID + " in the file. Bad!")
sys.exit(1)
# If the snp wasn't found, add it as a regular snp.
if len(gwas_associations_for_ld_snp) == 0:
logging.info("Found no matching assocation for " + ld_snp.rsID + " in the file. Including it as regular snp.")
ld_snps_that_could_not_be_converted_to_gwas_snps.append(ld_snp)
proper_gwas_cluster = GWAS_Cluster(
gwas_snps = gwas_cluster.gwas_snps,
ld_snps = ld_snps_converted_to_gwas_snps + ld_snps_that_could_not_be_converted_to_gwas_snps,
finemap_posteriors = None,
)
return proper_gwas_cluster
def create_snp_filter(self, snps):
rsIDs_to_look_for = []
for snp in snps:
rsIDs_to_look_for.append(snp.rsID)
logging.info( "Searching for snps: " + pformat(rsIDs_to_look_for) )
def snp_name_filter(gwas_association):
rsID = gwas_association.snp.rsID
found = rsID in rsIDs_to_look_for
return found
return snp_name_filter
def create_pvalue_filter(self, pvalue_threshold):
return lambda pvalue: float(pvalue) < pvalue_threshold
def parse_gwas_data_file(
self,
gwas_data_file,
callback,
want_this_gwas_association_filter,
max_lines_to_return_threshold = None,
):
file = open(gwas_data_file)
column_labels = file.readline().strip().split('\t')
number_of_lines_returned = 0
for line in file:
items = line.rstrip().split('\t')
parsed = dict()
for column_index, column_label in enumerate(column_labels):
parsed[column_label] = items[column_index]
if not want_this_gwas_association_filter(parsed["p-value"]):
continue
try:
# TODO insert study info (from command line? config file?)
gwas_association = GWAS_Association(
pvalue = float(parsed["p-value"]),
pvalue_description = 'Manual',
snp = parsed["variant_id"],
disease = Disease(name = 'Manual', efo = 'EFO_Manual'),
reported_trait = "Manual",
source = "Manual",
publication = "PMID000",
study = "Manual",
sample_size = 1000,
odds_ratio = None,
odds_ratio_ci_start = None,
odds_ratio_ci_end = None,
beta_coefficient = float(parsed["beta"]),
beta_coefficient_unit = "Manual",
beta_coefficient_direction = "Manual",
rest_hash = None,
risk_alleles_present_in_reference = None,
)
except ValueError:
continue
callback(gwas_association)
number_of_lines_returned += 1
if max_lines_to_return_threshold is not None and number_of_lines_returned>=max_lines_to_return_threshold:
break
class GWAS_DB(GWAS_source):
display_name = "GWAS DB"
def run(self, diseases, iris):
"""
Returns all GWAS SNPs associated to a disease in GWAS_DB
Args:
* [ string ] (trait descriptions)
* [ string ] (trait Ontology IRIs)
Returntype: [ GWAS_Association ]
"""
file = open(postgap.Globals.DATABASES_DIR+"/GWAS_DB.txt")
res = [ self.get_association(line, diseases, iris) for line in file ]
res = filter(lambda X: X is not None, res)
logging.info("\tFound %i GWAS SNPs associated to diseases (%s) or EFO IDs (%s) in GWAS DB" % (len(res), ", ".join(diseases), ", ".join(iris)))
return res
def get_association(self, line, diseases, iris):
'''
GWAS DB data
1. CHR
2. POS
3. SNPID
4. P_VALUE
5. PUBMED ID
6. MESH_TERM
7. EFO_ID
'''
items = line.rstrip().split('\t')
for iri in items[6].split(','):
if iri in iris:
return GWAS_Association(
pvalue = float(items[3]),
pvalue_description = None,
snp = items[2],
disease = Disease(name = postgap.EFO.term(iri), efo = iri),
reported_trait = items[5].decode('latin1'),
source = self.display_name,
publication = items[4],
study = None,
sample_size = "N/A",
odds_ratio = None,
odds_ratio_ci_start=None,
odds_ratio_ci_end=None,
beta_coefficient = None,
beta_coefficient_unit = None,
beta_coefficient_direction = None,
rest_hash = None,
risk_alleles_present_in_reference = None
)
if items[5] in diseases:
iri = items[6].split(',')[0]
return GWAS_Association(
pvalue = float(items[3]),
pvalue_description = None,
snp = items[2],
disease = Disease(name = postgap.EFO.term(iri), efo = iri),
reported_trait = items[5].decode('latin1'),
source = self.display_name,
publication = items[4],
study = None,
sample_size = "N/A",
odds_ratio = None,
odds_ratio_ci_start=None,
odds_ratio_ci_end=None,
beta_coefficient = None,
beta_coefficient_unit = None,
beta_coefficient_direction = None,
rest_hash = None,
risk_alleles_present_in_reference = None
)
return None
def get_filtered_subclasses(subclasses_filter):
subclasses_filter = [sc.replace('_', ' ') for sc in subclasses_filter]
return [subclass for subclass in sources if subclass.display_name in subclasses_filter]
sources = GWAS_source.__subclasses__()
| 0.049677 |
# nxt.sensor module -- Classes to read LEGO Mindstorms NXT sensors
# Copyright (C) 2006,2007 Douglas P Lau
# Copyright (C) 2009 Marcus Wanner, Paulo Vieira
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from time import sleep
from nxt.error import I2CError, I2CPendingError
PORT_1 = 0x00
PORT_2 = 0x01
PORT_3 = 0x02
PORT_4 = 0x03
class Type(object):
'Namespace for enumeration of the type of sensor'
# NOTE: just a namespace (enumeration)
NO_SENSOR = 0x00
SWITCH = 0x01 # Touch sensor
TEMPERATURE = 0x02
REFLECTION = 0x03
ANGLE = 0x04
LIGHT_ACTIVE = 0x05 # Light sensor (illuminated)
LIGHT_INACTIVE = 0x06 # Light sensor (ambient)
SOUND_DB = 0x07 # Sound sensor (unadjusted)
SOUND_DBA = 0x08 # Sound sensor (adjusted)
CUSTOM = 0x09
LOW_SPEED = 0x0A
LOW_SPEED_9V = 0x0B # Low-speed I2C (Ultrasonic sensor)
class Mode(object):
'Namespace for enumeration of the mode of sensor'
# NOTE: just a namespace (enumeration)
RAW = 0x00
BOOLEAN = 0x20
TRANSITION_CNT = 0x40
PERIOD_COUNTER = 0x60
PCT_FULL_SCALE = 0x80
CELSIUS = 0xA0
FAHRENHEIT = 0xC0
ANGLE_STEPS = 0xE0
MASK = 0xE0
MASK_SLOPE = 0x1F # Why isn't this slope thing documented?
class Sensor(object):
'Main sensor object'
def __init__(self, brick, port):
self.brick = brick
self.port = port
self.sensor_type = Type.NO_SENSOR
self.mode = Mode.RAW
def set_input_mode(self):
self.brick.set_input_mode(self.port, self.sensor_type,
self.mode)
I2C_ADDRESS = {
0x00: ('version', 8),
0x08: ('product_id', 8),
0x10: ('sensor_type', 8),
0x11: ('factory_zero', 1), # is this really correct?
0x12: ('factory_scale_factor', 1),
0x13: ('factory_scale_divisor', 1),
0x14: ('measurement_units', 1),
}
def _make_query(address, n_bytes):
def query(self):
data = self.i2c_query(address, n_bytes)
if n_bytes == 1:
return ord(data)
else:
return data
return query
class _Meta(type):
'Metaclass which adds accessor methods for I2C addresses'
def __init__(cls, name, bases, dict):
super(_Meta, cls).__init__(name, bases, dict)
for address in I2C_ADDRESS:
name, n_bytes = I2C_ADDRESS[address]
q = _make_query(address, n_bytes)
setattr(cls, 'get_' + name, q)
class DigitalSensor(Sensor):
'Object for digital sensors'
__metaclass__ = _Meta
I2C_DEV = 0x02
def __init__(self, brick, port):
super(DigitalSensor, self).__init__(brick, port)
def _ls_get_status(self, n_bytes):
for n in range(3):
try:
b = self.brick.ls_get_status(self.port)
if b >= n_bytes:
return b
except I2CPendingError:
sleep(0.01)
raise I2CError, 'ls_get_status timeout'
def i2c_command(self, address, value):
msg = chr(DigitalSensor.I2C_DEV) + chr(address) + chr(value)
self.brick.ls_write(self.port, msg, 0)
def i2c_query(self, address, n_bytes):
msg = chr(DigitalSensor.I2C_DEV) + chr(address)
self.brick.ls_write(self.port, msg, n_bytes)
self._ls_get_status(n_bytes)
data = self.brick.ls_read(self.port)
if len(data) < n_bytes:
raise I2CError, 'Read failure'
return data[-n_bytes:]
class CommandState(object):
'Namespace for enumeration of the command state of sensors'
# NOTE: just a namespace (enumeration)
OFF = 0x00
SINGLE_SHOT = 0x01
CONTINUOUS_MEASUREMENT = 0x02
EVENT_CAPTURE = 0x03 # Check for ultrasonic interference
REQUEST_WARM_RESET = 0x04
# I2C addresses for an Ultrasonic sensor
I2C_ADDRESS_US = {
0x40: ('continuous_measurement_interval', 1, True),
0x41: ('command_state', 1, True),
0x42: ('measurement_byte_0', 1, False),
0x43: ('measurement_byte_1', 1, False),
0x44: ('measurement_byte_2', 1, False),
0x45: ('measurement_byte_3', 1, False),
0x46: ('measurement_byte_4', 1, False),
0x47: ('measurement_byte_5', 1, False),
0x48: ('measurement_byte_6', 1, False),
0x49: ('measurement_byte_7', 1, False),
0x50: ('actual_zero', 1, True),
0x51: ('actual_scale_factor', 1, True),
0x52: ('actual_scale_divisor', 1, True),
}
def _make_command(address):
def command(self, value):
self.i2c_command(address, value)
return command
class _MetaUS(_Meta):
'Metaclass which adds accessor methods for US I2C addresses'
def __init__(cls, name, bases, dict):
super(_MetaUS, cls).__init__(name, bases, dict)
for address in I2C_ADDRESS_US:
name, n_bytes, set_method = I2C_ADDRESS_US[address]
q = _make_query(address, n_bytes)
setattr(cls, 'get_' + name, q)
if set_method:
c = _make_command(address)
setattr(cls, 'set_' + name, c)
class AnalogSensor(Sensor):
'Object for analog sensors'
def __init__(self, brick, port):
super(AnalogSensor, self).__init__(brick, port)
self.valid = False
self.calibrated = False
self.raw_ad_value = 0
self.normalized_ad_value = 0
self.scaled_value = 0
self.calibrated_value = 0
def get_input_values(self):
values = self.brick.get_input_values(self.port)
(self.port, self.valid, self.calibrated, self.sensor_type,
self.mode, self.raw_ad_value, self.normalized_ad_value,
self.scaled_value, self.calibrated_value) = values
return values
def reset_input_scaled_value(self):
self.brick.reset_input_scaled_value()
def get_sample(self):
self.get_input_values()
return self.scaled_value
class TouchSensor(AnalogSensor):
'Object for touch sensors'
def __init__(self, brick, port):
super(TouchSensor, self).__init__(brick, port)
self.sensor_type = Type.SWITCH
self.mode = Mode.BOOLEAN
self.set_input_mode()
def is_pressed(self):
return bool(self.scaled_value)
def get_sample(self):
self.get_input_values()
return self.is_pressed()
class LightSensor(AnalogSensor):
'Object for light sensors'
def __init__(self, brick, port):
super(LightSensor, self).__init__(brick, port)
self.set_illuminated(True)
def set_illuminated(self, active):
if active:
self.sensor_type = Type.LIGHT_ACTIVE
else:
self.sensor_type = Type.LIGHT_INACTIVE
self.set_input_mode()
class SoundSensor(AnalogSensor):
'Object for sound sensors'
def __init__(self, brick, port):
super(SoundSensor, self).__init__(brick, port)
self.set_adjusted(True)
def set_adjusted(self, active):
if active:
self.sensor_type = Type.SOUND_DBA
else:
self.sensor_type = Type.SOUND_DB
self.set_input_mode()
class UltrasonicSensor(DigitalSensor):
'Object for ultrasonic sensors'
__metaclass__ = _MetaUS
def __init__(self, brick, port):
super(UltrasonicSensor, self).__init__(brick, port)
self.sensor_type = Type.LOW_SPEED_9V
self.mode = Mode.RAW
self.set_input_mode()
sleep(0.1) # Give I2C time to initialize
def get_sample(self):
'Function to get data from ultrasonic sensors, synonmous to self.get_sample()'
self.set_command_state(CommandState.SINGLE_SHOT)
return self.get_measurement_byte_0()
class AccelerometerSensor(DigitalSensor):
'Object for Accelerometer sensors. Thanks to Paulo Vieira.'
__metaclass__ = _MetaUS
def __init__(self, brick, port):
super(AccelerometerSensor, self).__init__(brick, port)
self.sensor_type = Type.LOW_SPEED_9V
self.mode = Mode.RAW
self.set_input_mode()
sleep(0.1) # Give I2C time to initialize
def get_sample(self):
self.set_command_state(CommandState.SINGLE_SHOT)
out_buffer = [0,0,0,0,0,0]
# Upper X, Y, Z
out_buffer[0] = self.get_measurement_byte_0()
out_buffer[1] = self.get_measurement_byte_1()
out_buffer[2] = self.get_measurement_byte_2()
# Lower X, Y, Z
out_buffer[3] = self.get_measurement_byte_3()
out_buffer[4] = self.get_measurement_byte_4()
out_buffer[5] = self.get_measurement_byte_5()
self.xval = out_buffer[0]
if self.xval > 127:
self.xval -= 256
self.xval = self.xval * 4 + out_buffer[3]
self.yval = out_buffer[1]
if self.yval > 127:
self.yval -= 256
self.yval = self.yval * 4 + out_buffer[4]
self.zval = out_buffer[2]
if self.zval > 127:
self.zval -= 256
self.zval = self.zval * 4 + out_buffer[5]
self.xval = float(self.xval)/200
self.yval = float(self.yval)/200
self.zval = float(self.zval)/200
return self.xval, self.yval, self.zval
| 0.028338 |
from datetime import datetime, timedelta, date
import sys
import json
from pymongo import MongoClient
from bson.objectid import ObjectId
from django.http import JsonResponse
#------------------------------------------------------------#
def get_post_by_shares(mid):
mc = MongoClient()
db = mc.olap
shares = {}
shares['_id'] = mid
shares['insights'] = []
today = str(date.today())
today_minus7 = str(date.today() - timedelta(days=7))
#get only week old post
rec = db.post_history.find({"insights": { "$elemMatch" : { "date": {"$gte": today_minus7, "$lt": today}}} ,"_id":ObjectId(mid)})
if rec.count() != 1:
return JsonResponse(json.dumps({"error": "invalid id"}), safe=False)
for r in rec:
sha = r
for ins in sha['insights']:
shared = ins['share']
shared_dict = {}
shared_dict['engagements'] = shared
shares['insights'].append(shared_dict)
return JsonResponse(json.dumps(shares), safe=False)
#------------------------------------------------------------#
def get_post_by_engagements(mid):
mc = MongoClient()
db = mc.olap
engagements = {}
engagements['_id'] = mid
engagements['insights'] = []
today = str(date.today())
today_minus7 = str(date.today() - timedelta(days=7))
#get only week old post
rec = db.post_history.find({"insights": { "$elemMatch" : { "date": {"$gte": today_minus7, "$lt": today}}}, "_id":ObjectId(mid)})
if rec.count() != 1:
return JsonResponse(json.dumps({"error": "invalid id"}), safe=False)
for r in rec:
eng = r
for ins in eng['insights']:
engaged = ins['comment'] + ins['share'] + ins['like']
engaged_dict = {}
engaged_dict['engagements'] = engaged
engagements['insights'].append(engaged_dict)
return JsonResponse(json.dumps(engagements), safe=False)
#-------------------------------------------------------------#
def get_post_by_unique_users(mid):
mc = MongoClient()
db = mc.olap
unique_users = {}
unique_users['_id'] = mid
unique_users['insights'] = []
today = str(date.today())
today_minus7 = str(date.today() - timedelta(days=7))
#get only week old post
rec = db.post_history.find({"insights": { "$elemMatch" : { "date": {"$gte": today_minus7, "$lt": today}}}, "_id":ObjectId(mid)})
if rec.count() != 1:
return JsonResponse(json.dumps({"error": "invalid id"}), safe=False)
for r in rec:
uni = r
for ins in uni['insights']:
uniq = ins['uu']
uniq_dict = {}
uniq_dict['engagements'] = uniq
unique_users['insights'].append(uniq_dict)
return JsonResponse(json.dumps(unique_users), safe=False)
#-------------------------------------------------------------#
def get_comments_by_authors(auth):
mc = MongoClient()
db = mc.olap
author_comments = {}
author_comments['author'] = auth
author_comments['insights'] = []
today = str(date.today())
today_minus7 = str(date.today() - timedelta(days=7))
#get only week old post
rec = db.post_history.find({"insights": { "$elemMatch" : { "date": {"$gte": today_minus7, "$lt": today}}}, "author": auth})
for r in rec:
auth = r
for ins in auth['insights']:
comm = ins['comment']
comm_dict = {}
comm_dict['engagements'] = comm
comm_dict['date'] = ins['date']
author_comments['insights'].append(comm_dict)
return JsonResponse(json.dumps(author_comments), safe=False)
#-------------------------------------------------------------#
def get_engagements_by_authors(auth):
mc = MongoClient()
db = mc.olap
author_engagements = {}
author_engagements['author'] = auth
author_engagements['insights'] = []
today = str(date.today())
today_minus7 = str(date.today() - timedelta(days=7))
#get only week old post
rec = db.post_history.find({"insights": { "$elemMatch" : { "date": {"$gte": today_minus7, "$lt": today}}}, "author": auth})
for r in rec:
eng = r
for ins in eng['insights']:
engaged = ins['comment'] + ins['share'] + ins['like']
engaged_dict = {}
engaged_dict['engagements'] = engaged
engaged_dict['date'] = ins['date']
author_engagements['insights'].append(engaged_dict)
return JsonResponse(json.dumps(author_engagements), safe=False)
| 0.030749 |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import decorators
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class QuotasAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
force_tenant_isolation = True
@classmethod
def resource_setup(cls):
super(QuotasAdminNegativeTestJSON, cls).resource_setup()
cls.client = cls.os.quotas_client
cls.adm_client = cls.os_adm.quotas_client
cls.sg_client = cls.security_groups_client
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
cls.demo_tenant_id = cls.client.tenant_id
@test.attr(type=['negative', 'gate'])
def test_update_quota_normal_user(self):
self.assertRaises(lib_exc.Unauthorized,
self.client.update_quota_set,
self.demo_tenant_id,
ram=0)
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
@test.attr(type=['negative', 'gate'])
def test_create_server_when_cpu_quota_is_full(self):
# Disallow server creation when tenant's vcpu quota is full
quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_vcpu_quota = quota_set['cores']
vcpu_quota = 0 # Set the quota to zero to conserve resources
quota_set = self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
cores=vcpu_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
cores=default_vcpu_quota)
self.assertRaises((lib_exc.Unauthorized, lib_exc.OverLimit),
self.create_test_server)
@test.attr(type=['negative', 'gate'])
def test_create_server_when_memory_quota_is_full(self):
# Disallow server creation when tenant's memory quota is full
quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_mem_quota = quota_set['ram']
mem_quota = 0 # Set the quota to zero to conserve resources
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
ram=mem_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
ram=default_mem_quota)
self.assertRaises((lib_exc.Unauthorized, lib_exc.OverLimit),
self.create_test_server)
@test.attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
# Once instances quota limit is reached, disallow server creation
quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_instances_quota = quota_set['instances']
instances_quota = 0 # Set quota to zero to disallow server creation
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
instances=instances_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
instances=default_instances_quota)
self.assertRaises((lib_exc.Unauthorized, lib_exc.OverLimit),
self.create_test_server)
@decorators.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@test.attr(type='gate')
@test.services('network')
def test_security_groups_exceed_limit(self):
# Negative test: Creation Security Groups over limit should FAIL
quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_sg_quota = quota_set['security_groups']
sg_quota = 0 # Set the quota to zero to conserve resources
quota_set =\
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
security_groups=sg_quota)
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id,
security_groups=default_sg_quota)
# Check we cannot create anymore
# A 403 Forbidden or 413 Overlimit (old behaviour) exception
# will be raised when out of quota
self.assertRaises((lib_exc.Unauthorized, lib_exc.OverLimit),
self.sg_client.create_security_group,
"sg-overlimit", "sg-desc")
@decorators.skip_because(bug="1186354",
condition=CONF.service_available.neutron)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_security_groups_rules_exceed_limit(self):
# Negative test: Creation of Security Group Rules should FAIL
# when we reach limit maxSecurityGroupRules
quota_set = self.adm_client.get_quota_set(self.demo_tenant_id)
default_sg_rules_quota = quota_set['security_group_rules']
sg_rules_quota = 0 # Set the quota to zero to conserve resources
quota_set =\
self.adm_client.update_quota_set(
self.demo_tenant_id,
force=True,
security_group_rules=sg_rules_quota)
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id,
security_group_rules=default_sg_rules_quota)
s_name = data_utils.rand_name('securitygroup-')
s_description = data_utils.rand_name('description-')
securitygroup =\
self.sg_client.create_security_group(s_name, s_description)
self.addCleanup(self.sg_client.delete_security_group,
securitygroup['id'])
secgroup_id = securitygroup['id']
ip_protocol = 'tcp'
# Check we cannot create SG rule anymore
# A 403 Forbidden or 413 Overlimit (old behaviour) exception
# will be raised when out of quota
self.assertRaises((lib_exc.OverLimit, lib_exc.Unauthorized),
self.sg_client.create_security_group_rule,
secgroup_id, ip_protocol, 1025, 1025)
| 0 |
# Setup script for the pycvc package
#
# Usage: python setup.py install
#
import os
from setuptools import setup, find_packages
DESCRIPTION = "pycvc: Analyze CVC stormwater data"
LONG_DESCRIPTION = DESCRIPTION
NAME = "pycvc"
VERSION = "0.3.0"
AUTHOR = "Paul Hobson (Geosyntec Consultants)"
AUTHOR_EMAIL = "phobson@geosyntec.com"
URL = ""
DOWNLOAD_URL = ""
LICENSE = "BSD 3-clause"
PACKAGES = find_packages(exclude=[])
PLATFORMS = "Python 3.4 and later."
CLASSIFIERS = [
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Topic :: Formats and Protocols :: Data Formats",
"Topic :: Scientific/Engineering :: Earth Sciences",
"Topic :: Software Development :: Libraries :: Python Modules",
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
INSTALL_REQUIRES = ['wqio', 'pybmpdb', 'pynsqd']
PACKAGE_DATA = {
'pycvc.tex': ['*.tex'],
'pycvc.tests.testdata': ['*.csv', '*.accdb'],
'pycvc.tests.baseline_images.viz_tests': ['*.png'],
}
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=PACKAGES,
package_data=PACKAGE_DATA,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
zip_safe=False
)
| 0 |
from n2tvmtranslator import CommandLine, CommandType
import pytest
def test_init():
"""初期化テスト """
commandline = CommandLine(0, "command")
assert commandline.line_no == 0
assert commandline.raw_data == "command"
assert commandline.data == "command"
def test_init_strip():
"""前後空白除去
"""
commandline = CommandLine(1, " command \n")
assert commandline.line_no == 1
assert commandline.raw_data == " command \n"
assert commandline.data == "command"
def test_init_ignorecomment():
"""コメント除去
"""
commandline = CommandLine(123, " command // comment\n")
assert commandline.line_no == 123
assert commandline.raw_data == " command // comment\n"
assert commandline.data == "command"
@pytest.mark.parametrize('raw_data_command_type, command_type',
[
('', CommandType.BLANK_LINE),
('push constant 0', CommandType.C_PUSH),
('pop constant 0', CommandType.C_POP),
('add', CommandType.C_ARITHMETIC),
('sub', CommandType.C_ARITHMETIC),
('neg', CommandType.C_ARITHMETIC),
('eq', CommandType.C_ARITHMETIC),
('gt', CommandType.C_ARITHMETIC),
('lt', CommandType.C_ARITHMETIC),
('and', CommandType.C_ARITHMETIC),
('or', CommandType.C_ARITHMETIC),
('not', CommandType.C_ARITHMETIC),
('label hogege', CommandType.C_LABEL),
('if-goto hogege', CommandType.C_IF),
('goto hahahe', CommandType.C_GOTO),
('function fact 2', CommandType.C_FUNCTION),
('return', CommandType.C_RETURN),
('call func 3', CommandType.C_CALL),
])
def test_commandtype(raw_data_command_type, command_type):
commandline = CommandLine(0, raw_data_command_type)
assert commandline.command_type == command_type
@pytest.mark.parametrize('raw_data_arg1, arg1',
[
('push constant 0', 'constant'),
('pop local 0', 'local'),
('add', 'add'),
('sub', 'sub'),
('neg', 'neg'),
('eq', 'eq'),
('gt', 'gt'),
('lt', 'lt'),
('and', 'and'),
('or', 'or'),
('not', 'not'),
('label hogege', 'hogege'),
('if-goto hoyoyo', 'hoyoyo'),
('goto hahaha', 'hahaha'),
('function fact 2', 'fact'),
('call func 2', 'func'),
])
def test_arg1(raw_data_arg1, arg1):
commandline = CommandLine(0, raw_data_arg1)
assert commandline.arg1 == arg1
@pytest.mark.parametrize('raw_data_arg2, arg2',
[
('push constant 0', 0),
('pop local 1234', 1234),
('function fact 2', 2),
('call fact 3', 3),
])
def test_arg2(raw_data_arg2, arg2):
commandline = CommandLine(0, raw_data_arg2)
assert commandline.arg2 == arg2
| 0 |
#!/usr/bin/env python
import zlib
import struct
import os, sys
import numpy as np
import IO # TODO StreamReader
global g_fields
g_fields = {}
def read_bool(data, offset):
v = bool(data[offset])
return v,offset+1
def read_byte(data, offset):
v = ord(data[offset])
return v,offset+1
def read_int16(data, offset):
v = struct.unpack_from('<h',data[offset:offset+2])[0]
return v,offset+2
def read_int32(data, offset):
v = struct.unpack_from('<i',data[offset:offset+4])[0]
return v,offset+4
def read_uint32(data, offset):
v = struct.unpack_from('<I',data[offset:offset+4])[0]
return v,offset+4
def read_int64(data, offset):
v = struct.unpack_from('<q',data[offset:offset+8])[0]
return v,offset+8
def read_float(data, offset):
v = struct.unpack_from('<f',data[offset:offset+4])[0]
return v,offset+4
def read_double(data, offset):
v = struct.unpack_from('<d',data[offset:offset+8])[0]
return v,offset+8
def read_string(data, offset):
size,offset = read_uint32(data, offset)
return data[offset:offset+size],offset+size
def read_shortstring(data, offset):
size,offset = read_byte(data, offset)
return data[offset:offset+size],offset+size
def read_array(dt, data, offset):
length,offset = read_uint32(data, offset)
is_compressed,offset = read_uint32(data, offset)
s,offset = read_string(data, offset)
if is_compressed:
assert is_compressed == 1
s = zlib.decompress(s, 0, np.dtype(dt).itemsize*length)
ret = np.fromstring(s, dtype=dt)
assert len(ret) == length
return ret,offset
g_FBX_types = {ord('B'):read_bool, ord('C'):read_byte, ord('F'):read_float, ord('D'):read_double,\
ord('Y'):read_int16, ord('I'):read_int32, ord('L'):read_int64, ord('R'):read_string}
g_FBX_array_types = {ord('b'):np.bool, ord('c'):np.uint8, ord('f'):np.float32, ord('d'):np.float64,\
ord('y'):np.int16, ord('i'):np.int32, ord('l'):np.int64}
def read_attr(data,offset):
global g_FBX_types, g_FBX_array_types
data_type,offset = read_byte(data, offset)
if data_type in g_FBX_types: return g_FBX_types[data_type](data, offset)
if data_type in g_FBX_array_types: return read_array(g_FBX_array_types[data_type], data, offset)
if data_type == ord('S'):
s,offset = read_string(data,offset)
return s.replace('\0\x01','::'),offset
print 'unknown type',data_type,chr(data_type)
raise
def read_header(data,offset):
assert data[offset:offset+23] == 'Kaydara FBX Binary \0\x1a\0'
return offset+23
def read_node(data,offset):
global g_ns,g_nsz
end_offset,num_attrs,attrs_bytes = struct.unpack_from(g_ns,data[offset:offset+g_nsz]) # 4GB filesize bug: fixed in version >= 7500
name,offset = read_shortstring(data, offset+g_nsz)
if end_offset is 0:
assert num_attrs == 0, repr(num_attrs)
assert attrs_bytes == 0, repr(attrs_bytes)
assert name == '', repr(name)
return (None,None),offset
node = {}
attrs_end_offset = offset + attrs_bytes
for i in range(num_attrs):
attr,offset = read_attr(data, offset)
node.setdefault('attrs',[]).append(attr)
assert offset == attrs_end_offset
# special case: immediately simplify a node that contains a single attribute to the value
if offset == end_offset and num_attrs == 1: return (name,node['attrs'][0]),offset
while offset < end_offset:
(childname,child),offset = read_node(data, offset)
if child is None: break
node.setdefault(childname,[]).append(child)
# special case: decode ['Properties70'][0]['P'] to ['props']
# Properties70 attributes encode a name, class, type, flags(?), value
if 'Properties70' in node:
p70 = node.pop('Properties70')
assert len(p70) == 1 and p70[0].keys() == ['P']
node['props'] = ps = {}
gui_map = {'Enum':'enum','ReferenceProperty':'float','Number':'float','Integer':'int','Short':'int','bool':'bool', 'Bool':'bool', 'enum':'enum', 'Url':'string', 'KString':'string', 'Compound':'compound', 'DateTime':'datetime', 'Time':'time', 'object':'object', 'Visibility':'bool', 'Visibility Inheritance':'bool', 'Blob':'Blob', 'charptr':'string', 'Action':'float', 'ULongLong':'long','XRefUrl':'string','Matrix Transformation':'matrix','Vector2':'float2', 'float':'float','vec_int':'vec_int'}
for p in p70[0]['P']:
o = p.pop('attrs')
gui = o[2]
if gui == '': gui = o[1]
nvs = len(o[4:])
# 'U' in flags -> positive floats
if gui not in gui_map and nvs and type(o[4]) is float: gui_map[gui] = 'float' if nvs ==1 else 'float'+str(nvs)
if gui not in gui_map: print 'TODO',o; gui_map[gui] = gui
gui = gui_map[gui]
enum=None
if nvs == 2 and gui=='enum':
nvs = 1
enum = o[5].split('~')
if '+' in o[3] and not nvs>1: print 'unexpected',o
if '+' not in o[3] and nvs>1 and not (nvs==3 and gui[-1]=='3'): print 'unexpected2',o,gui,nvs
if nvs > 1 and not gui[-1].isdigit(): gui = 'vec_'+gui
prop = (o[4] if nvs == 1 else o[4:])
global g_fields
#if name not in g_fields or o[0] not in g_fields[name]: print 'FIELD INFO',name,o
g_fields.setdefault(name,{}).setdefault(o[0],{}).update([('class',o[1]),('type',gui),('flags',o[3])])
if enum: g_fields[name][o[0]]['enum'] = enum
if 'BinaryData' in p: prop = p.pop('BinaryData')
assert not p,repr(p) # now it's empty
ps[o[0]] = prop
assert offset == end_offset, repr(offset)+'!='+repr(end_offset)
return (name,node),offset
def load(fn):
data = open(fn,'rb').read()
offset = read_header(data, 0)
version,offset = read_uint32(data, offset)
doc = {'_version':version}
global g_ns,g_nsz
g_ns,g_nsz = '<III',12
if version >= 7500: g_ns,g_nsz = '<QQQ',24
while 1:
(name,node),offset = read_node(data, offset)
if node is None: break
assert name not in doc
doc[name] = node
footer,offset = data[offset:],len(data) # footer
return doc
def decode(fbx_payload):
ret = {}
glob = fbx_payload['GlobalSettings']
globs = glob['props']
ret['globs'] = globs
#assert globs['TimeSpanStop'] == 46186158000L, repr(globs['TimeSpanStop']) # 1919027552091
timeScale = 1.0/46186158000 # seconds units = 7697693 * 6000
conn = fbx_payload['Connections']
assert conn.keys() == ['C']
conn = conn['C']
object_parent,object_prop_parent,prop_object_parent,prop_prop_parent = {},{},{},{}
for c in conn:
assert c.keys() == ['attrs']
c = c['attrs']
ctype = c[0]
if ctype == 'OO': object_parent[c[1]] = c[2]
elif ctype == 'OP': object_prop_parent.setdefault(c[1],{})[c[3]] = c[2]
elif ctype == 'PO': prop_object_parent.setdefault(c[1],{})[c[2]] = c[3]
elif ctype == 'PP': prop_prop_parent.setdefault(c[1],{}).setdefault(c[2],{})[c[4]] = c[3]
else: assert False,repr(ctype)
ret['object_parent'] = object_parent
ret['object_prop_parent'] = object_prop_parent
ret['prop_object_parent'] = prop_object_parent
ret['prop_prop_parent'] = prop_prop_parent
defs = fbx_payload['Definitions']
count = defs['Count'][0]
version = defs['Version'][0]
dots = defs['ObjectType']
defn = {}
for dot in dots:
tclass = dot['attrs'][0]
count = count - dot['Count'][0]
if 'PropertyTemplate' in dot:
defn[tclass] = dot['PropertyTemplate'][0]['props']
assert count == 0
ret['defs'] = defn
objects = fbx_payload['Objects']
objs = {}
ignores = set(['props','attrs','KeyTime','KeyValueFloat'])
for node,nodes in objects.iteritems():
for nd in nodes:
k,t,h = nd['attrs']
val = {'attrs':nd.get('props',{})}
if node == 'AnimationCurve':
kts = np.float32(nd['KeyTime'][0] * timeScale)
kvs = nd['KeyValueFloat'][0]
assert len(kts) == len(kvs)
val['curve'] = np.float32((kts,kvs)) # 2xN
name = t.split('::')[0].split(':')[-1] if '::' in t else t
val.update({'type':node,'id':k,'fullname':t,'name':name,'class':h})
for v in nd.keys():
if v not in ignores:
tmp = nd[v]
if len(tmp)==1: tmp = tmp[0]
val['attrs'][v] = tmp
assert k not in objs
if k in object_parent: val['parent'] = object_parent[k]
if k in object_prop_parent: val['pparent'] = object_prop_parent[k]
#else: assert node == 'AnimationStack', repr(node) # ?
objs[k] = val
ret['objs'] = objs
return ret
def mobu_rmat(xyz_degrees, ro=0):
#['xyz','xzy','yzx','yxz','zxy','zyx'][ro] NB right-to-left matrix order because our matrices are the transpose of mobu
sx,sy,sz = np.sin(np.radians(xyz_degrees))
cx,cy,cz = np.cos(np.radians(xyz_degrees))
mx = np.array([[1,0,0],[0,cx,-sx],[0,sx,cx]],dtype=np.float32)
my = np.array([[cy,0,sy],[0,1,0],[-sy,0,cy]],dtype=np.float32)
mz = np.array([[cz,-sz,0],[sz,cz,0],[0,0,1]],dtype=np.float32)
m1 = [mz,my,mx,mz,my,mx][ro]
m2 = [my,mz,mz,mx,mx,my][ro]
m3 = [mx,mx,my,my,mz,mz][ro]
return np.dot(m1,np.dot(m2,m3))
def matrix_mult(p,c):
ret = np.dot(p[:3,:3],c[:3,:])
ret[:3,3] += p[:3,3]
return ret
def matrix_inverse(m):
ret = np.zeros((3,4),dtype=np.float32)
try:
ret[:,:3] = np.linalg.inv(m[:3,:3])
except:
print '???exception in matrix_inverse',list(ret.ravel()) # TODO HACK
ret[:,:3] = np.eye(3) #m[:3,:3].T
ret[:,3] = -np.dot(ret[:,:3],m[:3,3])
return ret
def sample(mat, t):
return mat[1][np.searchsorted(mat[0][:-1], t)] # HACK
return np.interp(t, mat[0], mat[1]) # TODO doesn't work for periodic mat[1] (angles)
def extract_geo(node):
if node['type'] != 'Geometry': return None
# node keys are:'name', 'parent', 'id', 'type', 'class', 'fullname', 'attrs'
# node['attrs'] : 'LayerElementNormal', 'GeometryVersion', 'LayerElementUV', 'Vertices', 'Edges', 'Layer', 'PolygonVertexIndex', 'LayerElementMaterial'
if node['class'] == 'Shape': # blendshape
#print node['attrs'].keys() #['Vertices', 'Version', 'Normals', 'Indexes']
return {'verts':np.float32(node['attrs']['Vertices']).reshape(-1,3)*10.0,'indices':np.int32(node['attrs']['Indexes'])}
if node['class'] == 'NurbsCurve':
#['NurbsCurveVersion', 'KnotVector', 'Form', 'Dimension', 'Points', 'Rational', 'GeometryVersion', 'Type', 'Order']
vs = np.float32(node['attrs']['Points']).reshape(-1,4)[:,:3]*10.0
return {'verts':vs,'edges':[[i,(i+1) % len(vs)] for i in range(len(vs)-1 if node['attrs']=='Open' else len(vs))]}
print node['type'],node['class'],node['attrs'].keys()
vs, vis = node['attrs']['Vertices'], node['attrs']['PolygonVertexIndex']
#es = node['Edges']
#if len(es) % 2 != 0: print 'unexpected',len(es),es # 3983
layer, layer_uv, layer_mtl = node['attrs']['Layer'], node['attrs']['LayerElementUV'], node['attrs']['LayerElementMaterial']
vs = np.float32(vs).reshape(-1,3)*10.0 # Vertices
#es = np.int32(es).reshape(-1,2) # Edges
vis = np.int32(vis) # Polys (index into vs, last edge has minus)
faces = []
face = []
for vi in vis:
if vi < 0:
face.append(-1-vi)
faces.append(face)
face = []
else: face.append(vi)
return {'verts':vs, 'faces':faces}
def extract_animation(fbx, skel_dict):
if skel_dict is None: return None
objs = fbx['objs']
nodeObjs = [(k,v) for k,v in sorted(objs.iteritems())[::-1] if v['type'] == 'AnimationCurve']
jointIds = skel_dict['jointIds']
num_joints = len(jointIds)
translations = [[None,None,None] for i in jointIds]
rotations = [[None,None,None] for i in jointIds]
scalings = [[None,None,None] for i in jointIds] # TODO not hooked up yet
deform_percents = [[None] for i in jointIds] # TODO not hooked up yet
visibility = [[True] for i in jointIds] # TODO not hooked up yet
param_map = {'Lcl Translation':translations, 'Lcl Rotation':rotations, 'Lcl Scaling':scalings, 'DeformPercent':deform_percents,
'Visibility':visibility}
chan_map = {'d|X':0, 'd|Y':1, 'd|Z':2, 'd|DeformPercent':0, 'd|Visibility':0}
for kid,kval in nodeObjs:
try:
curve = kval['curve']
[(cn,pid),] = kval['pparent'].items()
pval = fbx['objs'][pid]
loop = set()
while pid not in jointIds:
if pid in loop: raise Exception('loop detected'+str(pid))
loop.add(pid)
while 'pparent' not in pval:
print 'DEBUG',pval['type'],pval['name']
pval = objs[pval['parent']]
if len(pval['pparent'].items()) != 1:
print 'UNEX',pval['pparent'], kval['name']
(ppn,pid) = pval['pparent'].items()[0]
#print ppn, # d|Y 2843320234096 Lcl Translation 2847226443280 # TODO other params here: MaxTranslation
pval = fbx['objs'][pid]
jid = jointIds.index(pid)
param_map[ppn][jid][chan_map[cn]] = curve
except Exception as E:
print 'ERR',E
jcs,jcss,jcns = [],[0],[]
for L,jn,ts,rs,ro in zip(skel_dict['Ls'],skel_dict['jointNames'],translations,rotations,skel_dict['jointROs']):
for cn,t in zip('xyz',ts):
if t is not None:
ci = ord(cn)-ord('x')
jcs.append(ci)
jcns.append(jn+':t'+cn)
L[ci,3] = 0 # override local translation; TODO this won't work if there's a pretranslation?
jcss.append(len(jcs))
for cn in ro:
ci = ord(cn)-ord('x')
r = rs[ci]
if r is not None:
jcs.append(ci+3)
jcns.append(jn+':r'+cn)
jcss.append(len(jcs))
skel_dict['jointChans'] = np.array(jcs, dtype=np.int32)
skel_dict['jointChanSplits'] = np.array(jcss, dtype=np.int32)
skel_dict['chanNames'] = jcns
skel_dict['numChans'] = len(jcs)
skel_dict['chanValues'] = np.zeros(len(jcs), dtype=np.float32)
#print jcs[:10],jcss[:10],jcns[:10],skel_dict['jointParents'][:10],skel_dict['Gs'][:3]
#print jcns
#from pprint import pprint
#pprint(zip(skel_dict['jointROs'],skel_dict['jointNames']))
return {'t':translations,'r':rotations}
def extract_skeleton(fbx):
objs = fbx['objs']
#print set([x['type'] for x in objs.itervalues()])
skel_types = set(['Model','Deformer','Geometry','CollectionExclusive'])
nodeObjs = [(k,v.get('parent',0),v) for k,v in sorted(objs.iteritems())[::-1] if v['type'] in skel_types]
def depth_first_children(js, parent):
for jid,pid,jval in nodeObjs:
if pid != parent: continue
js.append((jid,pid,jval))
depth_first_children(js, jid)
return js
js = depth_first_children([], 0)
if js == []: return None
jids,jps,jvals = zip(*js)
jointIds = list(jids)
jointParents = [jointIds.index(pid) if pid != 0 else -1 for pid in jps]
jointGeos = map(extract_geo, jvals)
jointNames = [v['name'] for v in jvals]
jointTypes = [v['type'] for v in jvals]
#for jt,jv in zip(jointTypes, jvals):
# if jt == 'Deformer':
# from pprint import pprint
# pprint(jv)
#exit()
numJoints = len(jointIds)
jointChans = []
jointChanSplits = np.zeros(len(jointNames)*2+1, dtype=np.int32)
dofNames = []
numDofs = 0
is_bone = [v.get('class','')=='LimbNode' for v in jvals]
ros = [v['attrs'].get('RotationOrder',0) for v in jvals]
rs = [v['attrs'].get('PreRotation',[0,0,0]) for v in jvals]
r2s = [v['attrs'].get('Lcl Rotation',[0,0,0]) for v in jvals]
ts = [v['attrs'].get('Lcl Translation',[0,0,0]) for v in jvals]
Ls = np.zeros((numJoints,3,4), dtype=np.float32)
for r,r2,ro,t,L in zip(rs,r2s,ros,ts,Ls):
L[:3,:3] = mobu_rmat(r) #np.dot(mobu_rmat(r),mobu_rmat(r2,ro))
L[:,3] = np.float32(t)*10.0
# if n.has_key('Transform'):
# L[:3,:4] = n['TransformLink'].reshape(4,4)[:4,:3].T * [1,1,1,10]
Gs = np.zeros((numJoints,3,4), dtype=np.float32)
for p,L,G,n in zip(jointParents,Ls,Gs,jvals):
if n['attrs'].has_key('TransformLink'): # TransformLink is the global matrix of the bone at bind time # TODO this is not right
TL_new = n['attrs']['TransformLink'].reshape(4,4)[:4,:3].T * [1,1,1,10]
T_new = n['attrs']['Transform'].reshape(4,4)[:4,:3].T * [1,1,1,10]
assert np.allclose(matrix_mult(T_new,TL_new), np.eye(3,4))
L[:] = matrix_mult(matrix_inverse(Gs[p]),TL_new)
#if n['attrs'].has_key('Transform'): # Transform is the global matrix of the mesh at bind time
# L[:] = matrix_mult(L,n['attrs']['Transform'].reshape(4,4)[:4,:3].T * [1,1,1,10])
G[:] = matrix_mult(Gs[p], L) if p != -1 else L
Bs = Ls[:,:,3].copy()
return { 'name' : 'skel',
'jointIds' : jointIds,
'jointTypes' : jointTypes,
'numJoints' : numJoints,
'jointNames' : jointNames, # list of strings
'jointIndex' : dict(zip(jointNames,range(numJoints))), # dict of string:int
'jointParents' : np.int32(jointParents),
'jointChans' : np.int32(jointChans), # 0 to 5 : tx,ty,tz,rx,ry,rz
'jointChanSplits': np.int32(jointChanSplits),
'chanNames' : dofNames, # list of strings
'chanValues' : np.zeros(numDofs,dtype=np.float32),
'numChans' : int(numDofs),
'Bs' : Bs,
'Ls' : Ls,
'Gs' : Gs,
'jointROs' : [['xyz','xzy','yzx','yxz','zxy','zyx'][ro] for ro in ros],
'jointGeos' : jointGeos,
'isBone' : is_bone
}
def skelDictToMesh(skelDict):
Vs, Bs, Ts, Names, Faces = [], [], [], [], []
if skelDict is None: return dict(names=Names,verts=Vs,faces=Faces,bones=Bs,transforms=Ts)
for ji, jn in enumerate(skelDict['jointIds']):
bs = [[0,0,0]]
children_inds = np.where(skelDict['jointParents'] == ji)[0]
if skelDict['isBone'][ji]:
for ci in children_inds:
bs.append(skelDict['Bs'][ci])
Vs.append(bs)
Bs.append([(0,i) for i in range(1,len(bs))])
if len(children_inds) == 0: Bs[-1].append((0,0)) # TODO is this a workaround for a bug in GLMeshes?
Ts.append(skelDict['Gs'][ji])
Faces.append([])
Names.append('/fbx/'+str(jn))
geo = skelDict['jointGeos'][ji]
if geo is not None:
offset = len(bs)
Vs[-1] = list(geo['verts']) + list(np.int32(Vs[-1])+offset)
Bs[-1] = list(np.int32(Bs[-1])+offset)
if 'faces' in geo: Faces[-1].extend(list(geo['faces']))
if 'edges' in geo: Bs[-1].extend(list(geo['edges']))
return dict(names=Names,verts=Vs,faces=Faces,bones=Bs,transforms=Ts)
def skel_to_nodes(skel_dict):
ret = []
if skel_dict is None: return ret
jointNames = skel_dict['jointNames']
jointIds = skel_dict['jointIds']
jointParents = skel_dict['jointParents']
jointTypes = skel_dict['jointTypes']
for ji,(n,p,c) in enumerate(zip(jointNames,jointParents,list(jointParents[1:])+[-1])):
jid = jointIds[ji]
ps = '' if p == -1 else ' '*(ret[p][0].index('-'))
ret.append((ps+(' -+' if c==ji else ' -')+n+':'+jointTypes[ji],'/fbx/'+str(jid)))
return ret
def set_frame_CB(fi):
view = QApp.view()
skel_mesh = view.getLayer('skel')
global g_anim_dict, g_skel_dict
t = g_anim_dict['t']
r = g_anim_dict['r']
chan_values = g_skel_dict['chanValues']
jcs = g_skel_dict['jointChans']
jcss = g_skel_dict['jointChanSplits']
num_joints = g_skel_dict['numJoints']
anim = []
time_sec = fi / 120. # TODO time range, fps
for ji in range(num_joints):
for ti in range(jcss[2*ji],jcss[2*ji+1]):
anim.append(sample(t[ji][jcs[ti]], time_sec)*10.0)
for ri in range(jcss[2*ji+1],jcss[2*ji+2]):
anim.append(np.radians(sample(r[ji][jcs[ri]-3], time_sec)))
#print ji,anim[:10]
g_skel_dict['chanValues'][:] = anim
from GCore import Character
Character.updatePoseAndMeshes(g_skel_dict, skel_mesh, None)
#print g_skel_dict['Gs'][:3]
view.updateGL()
def pickedCB(view,data,clearSelection=True):
print 'pickedCB',view
print data
print clearSelection
if data is None:
QApp.app.select(None)
else:
primitive_type,pn,pi,distance = data
if primitive_type is '3d':
p = view.primitives[pn]
if isinstance(p,GLMeshes):
global g_skel_dict
name = str(g_skel_dict['jointIds'][pi])
print "Picked:", name
QApp.app.select('/fbx/'+name)
if __name__ == '__main__':
import sys
from GCore import State
from UI import GLMeshes
payload = load(sys.argv[1])
#from pprint import pprint; pprint(payload, open('fbx.txt','w'))
fbx = decode(payload)
IO.save('fbx.tmp',fbx)
skel_dict = extract_skeleton(fbx)
anim_dict = extract_animation(fbx, skel_dict)
#print skel_dict['jointNames']
skel_mesh = GLMeshes(**skelDictToMesh(skel_dict))
global g_anim_dict, g_skel_dict
g_anim_dict = anim_dict
g_skel_dict = skel_dict
#State.setKey('/doc',payload)
#fbx = decode(payload)
#State.setKey('/fbx',fbx)
#for k,v in fbx.iteritems(): State.setKey(k,v)
from UI import QApp, QGLViewer, GLMeshes, GLPoints3D
app,win = QGLViewer.makeApp(appName='Imaginarium FBX')
#outliner = QApp.app.qobjects
#for gi,(key,value) in enumerate(fbx.items()):
# outliner.addItem(str(key)+'='+repr(value)[:200], data=str(key), index=gi)
display_nodes = skel_to_nodes(skel_dict)
#print zip(*display_nodes)[1]
#for gi,(disp,key) in enumerate(display_nodes): outliner.addItem(disp, data='_OBJ_'+key, index=gi)
#global g_fields
QApp.fields = dict([(k,sorted(v.items())) for k,v in g_fields.iteritems()])
for gi,(k,v) in enumerate(fbx['objs'].items()): State.setKey('/fbx/'+str(k),v)
QApp.app.qoutliner.set_root('/fbx')
#for gi,key in enumerate(sorted(State.allKeys())): outliner.addItem(key+'='+repr(State.getKey(key))[:80], data=key, index=gi)
QGLViewer.makeViewer(timeRange=(0,8000), callback=set_frame_CB, layers={'skel':skel_mesh}, pickCallback=pickedCB) #, dragCallback=dragCB, keyCallback=keyCB, drawCallback=drawGL_cb, layers=layers)
| 0.05266 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import excutils
import taskflow.engines
from taskflow.patterns import linear_flow
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _LE
from cinder import rpc
from cinder import utils
from cinder.volume.flows import common
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
class ExtractSchedulerSpecTask(flow_utils.CinderTask):
"""Extracts a spec object from a partial and/or incomplete request spec.
Reversion strategy: N/A
"""
default_provides = set(['request_spec'])
def __init__(self, db_api, **kwargs):
super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION],
**kwargs)
self.db_api = db_api
def _populate_request_spec(self, context, volume, snapshot_id,
image_id):
# Create the full request spec using the volume object.
#
# NOTE(dulek): At this point, a volume can be deleted before it gets
# scheduled. If a delete API call is made, the volume gets instantly
# delete and scheduling will fail when it tries to update the DB entry
# (with the host) in ScheduleCreateVolumeTask below.
volume_type_id = volume.volume_type_id
vol_type = volume.volume_type
return {
'volume_id': volume.id,
'snapshot_id': snapshot_id,
'image_id': image_id,
'volume_properties': {
'size': utils.as_int(volume.size, quiet=False),
'availability_zone': volume.availability_zone,
'volume_type_id': volume_type_id,
},
'volume_type': list(dict(vol_type).items()),
}
def execute(self, context, request_spec, volume, snapshot_id,
image_id):
# For RPC version < 1.2 backward compatibility
if request_spec is None:
request_spec = self._populate_request_spec(context, volume.id,
snapshot_id, image_id)
return {
'request_spec': request_spec,
}
class ScheduleCreateVolumeTask(flow_utils.CinderTask):
"""Activates a scheduler driver and handles any subsequent failures.
Notification strategy: on failure the scheduler rpc notifier will be
activated and a notification will be emitted indicating what errored,
the reason, and the request (and misc. other data) that caused the error
to be triggered.
Reversion strategy: N/A
"""
FAILURE_TOPIC = "scheduler.create_volume"
def __init__(self, db_api, driver_api, **kwargs):
super(ScheduleCreateVolumeTask, self).__init__(addons=[ACTION],
**kwargs)
self.db_api = db_api
self.driver_api = driver_api
def _handle_failure(self, context, request_spec, cause):
try:
self._notify_failure(context, request_spec, cause)
finally:
LOG.error(_LE("Failed to run task %(name)s: %(cause)s"),
{'cause': cause, 'name': self.name})
def _notify_failure(self, context, request_spec, cause):
"""When scheduling fails send out an event that it failed."""
payload = {
'request_spec': request_spec,
'volume_properties': request_spec.get('volume_properties', {}),
'volume_id': request_spec['volume_id'],
'state': 'error',
'method': 'create_volume',
'reason': cause,
}
try:
rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC,
payload)
except exception.CinderException:
LOG.exception(_LE("Failed notifying on %(topic)s "
"payload %(payload)s"),
{'topic': self.FAILURE_TOPIC, 'payload': payload})
def execute(self, context, request_spec, filter_properties):
try:
self.driver_api.schedule_create_volume(context, request_spec,
filter_properties)
except Exception as e:
# An error happened, notify on the scheduler queue and log that
# this happened and set the volume to errored out and reraise the
# error *if* exception caught isn't NoValidHost. Otherwise *do not*
# reraise (since what's the point?)
with excutils.save_and_reraise_exception(
reraise=not isinstance(e, exception.NoValidHost)):
try:
self._handle_failure(context, request_spec, e)
finally:
common.error_out_volume(context, self.db_api,
request_spec['volume_id'],
reason=e)
def get_flow(context, db_api, driver_api, request_spec=None,
filter_properties=None,
volume=None, snapshot_id=None, image_id=None):
"""Constructs and returns the scheduler entrypoint flow.
This flow will do the following:
1. Inject keys & values for dependent tasks.
2. Extract a scheduler specification from the provided inputs.
3. Use provided scheduler driver to select host and pass volume creation
request further.
"""
create_what = {
'context': context,
'raw_request_spec': request_spec,
'filter_properties': filter_properties,
'volume': volume,
'snapshot_id': snapshot_id,
'image_id': image_id,
}
flow_name = ACTION.replace(":", "_") + "_scheduler"
scheduler_flow = linear_flow.Flow(flow_name)
# This will extract and clean the spec from the starting values.
scheduler_flow.add(ExtractSchedulerSpecTask(
db_api,
rebind={'request_spec': 'raw_request_spec'}))
# This will activate the desired scheduler driver (and handle any
# driver related failures appropriately).
scheduler_flow.add(ScheduleCreateVolumeTask(db_api, driver_api))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(scheduler_flow, store=create_what)
| 0 |
import random
import math
class Entity:
def __init__(self, type):
self.type = type
class Creature(Entity):
type = "Creature"
def __init__(self, name, hitpoints, power):
self.name = name
self.maxHP = hitpoints
self.currHP = hitpoints
self.power = power
def attack(self, target):
damage = math.floor(self.power * random.randrange(0, 21) / 10.0)
target.currHP = target.currHP - damage if target.currHP - damage >= 0 else 0
if damage > self.power * 1.25:
status = "Critical hit! "
elif damage < self.power * .25:
status = "Glancing blow! "
else:
status = ""
print "%s%s hit %s for %d damage." % (status, self.name, target.name, damage)
def toString(self):
return "%s: %d/%d" % (self.name, self.currHP, self.maxHP)
class Structure(Entity):
type = "Structure"
def __init__(self, name, hitpoints):
self.name = name
self.maxHP = hitpoints
self.currHP = hitpoints
def toString(self):
return "%s: %d/%d" % (self.name, self.currHP, self.maxHP)
class Spell(Entity):
type = "Spell"
def __init__(self, name, power, cost):
self.name = name
self.power = power
self.cost = cost
def cast(self, target):
damage = self.power
target.currHP = target.currHP - damage if target.currHP - damage >= 0 else 0
print "%s was struck by %s for %d damage." % (target.name, self.name, damage)
def toString(self):
return "'%s': PWR - %d, COST - %d" % (self.name, self.power, self.cost) | 0.030007 |
#!/usr/bin/python
"""
Extract MPEG-2 video from AR Drone 2.0 stream
usage:
./video.py <logged stream> <output directory> [<startIndex>]
"""
import sys
import os
import struct
import socket
VIDEO_SOCKET_TIMEOUT=1.0
def nextCmd( q ):
try:
return q.get(block=False)
except:
return None
def logVideoStream( hostPortPair, filename, queueCmd, flushWholeVideo=False ):
"wait for termination command and otherwise log data"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP
s.connect( hostPortPair )
s.settimeout( VIDEO_SOCKET_TIMEOUT )
f = open( filename, "wb" )
cmd = None
while cmd == None:
try:
data = s.recv(10240)
f.write(data)
f.flush()
except socket.timeout:
print "Video filename TIMEOUT"
cmd = nextCmd( queueCmd )
if flushWholeVideo:
while True:
try:
data = s.recv(10240)
f.write(data)
f.flush()
except socket.timeout:
print "REC Video Completed (TIMEOUT)"
break
f.close()
s.close()
def convertVideo( filename, outDir, frameIndex = 0 ):
fin = open( filename, "rb")
save = False
data = fin.read(4)
while len(data) > 0:
assert data == "PaVE" # PaVE (Parrot Video Encapsulation)
print "version", ord(fin.read(1))
print "codec", ord(fin.read(1))
headerSize = struct.unpack_from("H", fin.read(2))[0]
print "header size", headerSize
payloadSize = struct.unpack_from("I", fin.read(4))[0]
print "payload size", payloadSize
buf = fin.read(headerSize - 12)
print "BUF", len(buf)
arr = struct.unpack_from("HHHHIIBBBBIIHBBBBBBI",buf) # resolution, index, chunk, type, (ignored III reserved)
print "ARR", arr
print "Frame number", arr[4]
if arr[8]==1:
save = True
# print "chunk", struct.unpack_from("",buf[16:])
payload = fin.read(payloadSize)
print "Payload Data:", [ord(c) for c in payload[:8]]
if save:
fout = open(outDir+os.sep+"frame%04d.bin" % frameIndex, "wb")
fout.write(payload)
fout.close()
frameIndex += 1
data = fin.read(4)
if __name__ == "__main__":
if len(sys.argv) < 3:
print __doc__
sys.exit(2)
frameIndex = 0
if len(sys.argv) > 3:
frameIndex = int(sys.argv[3])
convertVideo( sys.argv[1], sys.argv[2], frameIndex=frameIndex )
| 0.027237 |
# Copyright 2004-2005 Joe Wreschnig, Michael Urman, Iñigo Serna
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import re
from senf import fsnative, fsn2text
from quodlibet.util import re_escape
class TagsFromPattern:
def __init__(self, pattern):
self.headers = []
self.slashes = len(pattern) - len(pattern.replace(os.path.sep, '')) + 1
self.pattern = None
# patterns look like <tagname> non regexy stuff <tagname> ...
pieces = re.split(r'(<[A-Za-z0-9~_]+>)', pattern)
override = {'<tracknumber>': r'\d\d?', '<discnumber>': r'\d\d??'}
dummies_found = 0
for i, piece in enumerate(pieces):
if not piece:
continue
if piece[0] + piece[-1] == '<>':
piece = piece.lower() # canonicalize to lowercase tag names
if "~" in piece:
dummies_found += 1
piece = "<QUOD_LIBET_DUMMY_%d>" % dummies_found
pieces[i] = '(?P%s%s)' % (piece, override.get(piece, '.+?'))
if "QUOD_LIBET" not in piece:
self.headers.append(piece[1:-1])
else:
pieces[i] = re_escape(piece)
# some slight magic to anchor searches "nicely"
# nicely means if it starts with a <tag>, anchor with a /
# if it ends with a <tag>, anchor with .xxx$
# but if it's a <tagnumber>, don't bother as \d+ is sufficient
# and if it's not a tag, trust the user
if pattern.startswith('<') and not pattern.startswith('<tracknumber>')\
and not pattern.startswith('<discnumber>'):
pieces.insert(0, re_escape(os.path.sep))
if pattern.endswith('>') and not pattern.endswith('<tracknumber>')\
and not pattern.endswith('<discnumber>'):
pieces.append(r'(?:\.[A-Za-z0-9_+]+)$')
self.pattern = re.compile(''.join(pieces))
def match(self, song):
return self.match_path(song["~filename"])
def match_path(self, path):
assert isinstance(path, fsnative)
tail = os.path.splitdrive(path)[-1]
# only match on the last n pieces of a filename, dictated by pattern
# this means no pattern may effectively cross a /, despite .* doing so
sep = os.path.sep
matchon = sep + sep.join(tail.split(sep)[-self.slashes:])
# work on unicode
matchon = fsn2text(matchon)
match = self.pattern.search(matchon)
# dicts for all!
if match is None:
return {}
else:
return match.groupdict()
| 0 |
from django.db import models
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtail.documents.models import Document
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.search import index
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from taggit.models import TaggedItemBase
class DocumentsIndexPage(Page):
"""
This is the index page for the Documents Gallery. It contains the links to
Gallery pages. Gallery Page displays the gallery documents according to
tags defined.
"""
intro = RichTextField(blank=True)
search_fields = Page.search_fields + [
index.SearchField('intro'),
]
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def children(self):
return self.get_children().live()
def get_context(self, request):
# Get list of live Gallery pages that are descendants of this page
pages = DocumentsPage.objects.live().descendant_of(self)
# Update template context
context = super(DocumentsIndexPage, self).get_context(request)
context['pages'] = pages
return context
class Meta:
verbose_name = "Documents Index Page"
DocumentsIndexPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full")
]
DocumentsIndexPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "SEO and metadata fields"),
ImageChooserPanel('feed_image'),
]
class DocumentsPageTag(TaggedItemBase):
content_object = ParentalKey(
'documents_gallery.DocumentsPage', related_name='tagged_items')
class DocumentsPage(Page):
"""
This is the Documents page. It takes tag names which you have assigned to
your documents. It gets the document objects according to tags defined by
you. Your document gallery will be created as per tags.
"""
tags = ClusterTaggableManager(through=DocumentsPageTag, blank=True)
feed_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def gallery_index(self):
# Find closest ancestor which is a Gallery index
return self.get_ancestors().type(DocumentsIndexPage).last()
def get_context(self, request):
# Get tags and convert them into list so we can iterate over them
tags = self.tags.values_list('name', flat=True)
# Creating empty Queryset from Wagtail Document model
documents = Document.objects.none()
if tags:
len_tags = len(tags)
for i in range(0, len_tags):
doc = Document.objects.filter(tags__name=tags[i])
documents = documents | doc
# Pagination
page = request.GET.get('page')
paginator = Paginator(documents, 25) # Show 25 documents per page
try:
documents = paginator.page(page)
except PageNotAnInteger:
documents = paginator.page(1)
except EmptyPage:
documents = paginator.page(paginator.num_pages)
# Update template context
context = super(DocumentsPage, self).get_context(request)
context['documents'] = documents
return context
class Meta:
verbose_name = "Documents Page"
DocumentsPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('tags'),
]
DocumentsPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "SEO and metadata fields"),
ImageChooserPanel('feed_image'),
]
| 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_messaging.rpc import dispatcher as rpc
from senlin.common import consts
from senlin.common import exception as exc
from senlin.conductor import service
from senlin.engine.actions import base as am
from senlin.engine import cluster as cm
from senlin.engine import dispatcher
from senlin.objects import cluster as co
from senlin.objects import node as no
from senlin.objects.requests import clusters as orco
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
class ClusterOpTest(base.SenlinTestCase):
def setUp(self):
super(ClusterOpTest, self).setUp()
self.ctx = utils.dummy_context(project='cluster_op_test_project')
self.svc = service.ConductorService('host-a', 'topic-a')
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(am.Action, 'create')
@mock.patch.object(no.Node, 'ids_by_cluster')
@mock.patch.object(cm.Cluster, 'load')
@mock.patch.object(co.Cluster, 'find')
def test_cluster_op(self, mock_find, mock_cluster, mock_nodes, mock_action,
mock_start):
x_db_cluster = mock.Mock(id='12345678AB')
mock_find.return_value = x_db_cluster
x_schema = mock.Mock()
x_profile = mock.Mock(OPERATIONS={'dance': x_schema})
x_cluster = mock.Mock(id='12345678AB')
x_cluster.rt = {'profile': x_profile}
mock_cluster.return_value = x_cluster
mock_action.return_value = 'ACTION_ID'
params = {'style': 'tango'}
filters = {'role': 'slave'}
mock_nodes.return_value = ['NODE1', 'NODE2']
req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER',
operation='dance',
params=params,
filters=filters)
result = self.svc.cluster_op(self.ctx, req.obj_to_primitive())
self.assertEqual({'action': 'ACTION_ID'}, result)
mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER')
mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster)
x_schema.validate.assert_called_once_with({'style': 'tango'})
mock_nodes.assert_called_once_with(self.ctx, '12345678AB',
filters={'role': 'slave'})
mock_action.assert_called_once_with(
self.ctx, '12345678AB', consts.CLUSTER_OPERATION,
name='cluster_dance_12345678',
cluster_id='12345678AB',
cause=consts.CAUSE_RPC,
status=am.Action.READY,
inputs={
'operation': 'dance',
'params': {'style': 'tango'},
'nodes': ['NODE1', 'NODE2']
}
)
mock_start.assert_called_once_with()
@mock.patch.object(co.Cluster, 'find')
def test_cluster_op_cluster_not_found(self, mock_find):
mock_find.side_effect = exc.ResourceNotFound(
type='cluster', id='Bogus')
req = orco.ClusterOperationRequest(identity='Bogus', operation='dance')
ex = self.assertRaises(rpc.ExpectedException,
self.svc.cluster_op,
self.ctx, req.obj_to_primitive())
self.assertEqual(exc.ResourceNotFound, ex.exc_info[0])
self.assertEqual("The cluster 'Bogus' could not be found.",
str(ex.exc_info[1]))
mock_find.assert_called_once_with(self.ctx, 'Bogus')
@mock.patch.object(cm.Cluster, 'load')
@mock.patch.object(co.Cluster, 'find')
def test_cluster_op_unsupported_operation(self, mock_find, mock_cluster):
x_db_cluster = mock.Mock(id='12345678AB')
mock_find.return_value = x_db_cluster
x_schema = mock.Mock()
x_profile = mock.Mock(OPERATIONS={'dance': x_schema}, type='cow')
x_cluster = mock.Mock()
x_cluster.rt = {'profile': x_profile}
mock_cluster.return_value = x_cluster
req = orco.ClusterOperationRequest(identity='node1', operation='swim')
ex = self.assertRaises(rpc.ExpectedException,
self.svc.cluster_op,
self.ctx, req.obj_to_primitive())
self.assertEqual(exc.BadRequest, ex.exc_info[0])
self.assertEqual("The requested operation 'swim' is not supported "
"by the profile type 'cow'.",
str(ex.exc_info[1]))
mock_find.assert_called_once_with(self.ctx, 'node1')
mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster)
@mock.patch.object(cm.Cluster, 'load')
@mock.patch.object(co.Cluster, 'find')
def test_cluster_op_bad_parameters(self, mock_find, mock_cluster):
x_db_cluster = mock.Mock(id='12345678AB')
mock_find.return_value = x_db_cluster
x_schema = mock.Mock()
x_schema.validate.side_effect = exc.ESchema(message='Boom')
x_profile = mock.Mock(OPERATIONS={'dance': x_schema})
x_cluster = mock.Mock()
x_cluster.rt = {'profile': x_profile}
mock_cluster.return_value = x_cluster
req = orco.ClusterOperationRequest(identity='node1', operation='dance',
params={'style': 'tango'})
ex = self.assertRaises(rpc.ExpectedException,
self.svc.cluster_op,
self.ctx, req.obj_to_primitive())
self.assertEqual(exc.BadRequest, ex.exc_info[0])
self.assertEqual("Boom.", str(ex.exc_info[1]))
mock_find.assert_called_once_with(self.ctx, 'node1')
mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster)
x_schema.validate.assert_called_once_with({'style': 'tango'})
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(am.Action, 'create')
@mock.patch.object(no.Node, 'ids_by_cluster')
@mock.patch.object(cm.Cluster, 'load')
@mock.patch.object(co.Cluster, 'find')
def test_cluster_op_no_parameters(self, mock_find, mock_cluster,
mock_nodes, mock_action, mock_start):
x_db_cluster = mock.Mock(id='12345678AB')
mock_find.return_value = x_db_cluster
x_schema = mock.Mock()
x_profile = mock.Mock(OPERATIONS={'dance': x_schema})
x_cluster = mock.Mock(id='12345678AB')
x_cluster.rt = {'profile': x_profile}
mock_cluster.return_value = x_cluster
mock_action.return_value = 'ACTION_ID'
filters = {'role': 'slave'}
mock_nodes.return_value = ['NODE1', 'NODE2']
req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER',
operation='dance',
filters=filters)
result = self.svc.cluster_op(self.ctx, req.obj_to_primitive())
self.assertEqual({'action': 'ACTION_ID'}, result)
mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER')
mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster)
self.assertEqual(0, x_schema.validate.call_count)
mock_nodes.assert_called_once_with(self.ctx, '12345678AB',
filters={'role': 'slave'})
mock_action.assert_called_once_with(
self.ctx, '12345678AB', consts.CLUSTER_OPERATION,
name='cluster_dance_12345678',
cluster_id='12345678AB',
cause=consts.CAUSE_RPC,
status=am.Action.READY,
inputs={
'operation': 'dance',
'params': {},
'nodes': ['NODE1', 'NODE2']
}
)
mock_start.assert_called_once_with()
@mock.patch.object(dispatcher, 'start_action')
@mock.patch.object(am.Action, 'create')
@mock.patch.object(no.Node, 'ids_by_cluster')
@mock.patch.object(cm.Cluster, 'load')
@mock.patch.object(co.Cluster, 'find')
def test_cluster_op_no_filters(self, mock_find, mock_cluster,
mock_nodes, mock_action, mock_start):
x_db_cluster = mock.Mock(id='12345678AB')
mock_find.return_value = x_db_cluster
x_schema = mock.Mock()
x_profile = mock.Mock(OPERATIONS={'dance': x_schema})
x_cluster = mock.Mock(id='12345678AB')
x_cluster.rt = {'profile': x_profile}
mock_cluster.return_value = x_cluster
mock_action.return_value = 'ACTION_ID'
mock_nodes.return_value = ['NODE1', 'NODE2']
req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER',
operation='dance')
result = self.svc.cluster_op(self.ctx, req.obj_to_primitive())
self.assertEqual({'action': 'ACTION_ID'}, result)
mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER')
mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster)
self.assertEqual(0, x_schema.validate.call_count)
mock_nodes.assert_called_once_with(self.ctx, '12345678AB')
mock_action.assert_called_once_with(
self.ctx, '12345678AB', consts.CLUSTER_OPERATION,
name='cluster_dance_12345678',
cluster_id='12345678AB',
cause=consts.CAUSE_RPC,
status=am.Action.READY,
inputs={
'operation': 'dance',
'params': {},
'nodes': ['NODE1', 'NODE2']
}
)
mock_start.assert_called_once_with()
@mock.patch.object(am.Action, 'create')
@mock.patch.object(no.Node, 'ids_by_cluster')
@mock.patch.object(cm.Cluster, 'load')
@mock.patch.object(co.Cluster, 'find')
def test_cluster_op_bad_filters(self, mock_find, mock_cluster,
mock_nodes, mock_action):
x_db_cluster = mock.Mock()
mock_find.return_value = x_db_cluster
x_schema = mock.Mock()
x_profile = mock.Mock(OPERATIONS={'dance': x_schema})
x_cluster = mock.Mock(id='12345678AB')
x_cluster.rt = {'profile': x_profile}
mock_cluster.return_value = x_cluster
mock_action.return_value = 'ACTION_ID'
mock_nodes.return_value = ['NODE1', 'NODE2']
filters = {'shape': 'round'}
req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER',
operation='dance',
filters=filters)
ex = self.assertRaises(rpc.ExpectedException,
self.svc.cluster_op,
self.ctx, req.obj_to_primitive())
self.assertEqual(exc.BadRequest, ex.exc_info[0])
self.assertEqual("Filter key 'shape' is unsupported.",
str(ex.exc_info[1]))
mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER')
mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster)
self.assertEqual(0, x_schema.validate.call_count)
self.assertEqual(0, mock_nodes.call_count)
self.assertEqual(0, mock_action.call_count)
@mock.patch.object(am.Action, 'create')
@mock.patch.object(no.Node, 'ids_by_cluster')
@mock.patch.object(cm.Cluster, 'load')
@mock.patch.object(co.Cluster, 'find')
def test_cluster_op_no_nodes_found(self, mock_find, mock_cluster,
mock_nodes, mock_action):
x_db_cluster = mock.Mock()
mock_find.return_value = x_db_cluster
x_schema = mock.Mock()
x_profile = mock.Mock(OPERATIONS={'dance': x_schema})
x_cluster = mock.Mock(id='12345678AB')
x_cluster.rt = {'profile': x_profile}
mock_cluster.return_value = x_cluster
mock_nodes.return_value = []
mock_action.return_value = 'ACTION_ID'
filters = {'role': 'slave'}
req = orco.ClusterOperationRequest(identity='FAKE_CLUSTER',
operation='dance', filters=filters)
ex = self.assertRaises(rpc.ExpectedException,
self.svc.cluster_op,
self.ctx, req.obj_to_primitive())
self.assertEqual(exc.BadRequest, ex.exc_info[0])
self.assertEqual("No node (matching the filter) could be found.",
str(ex.exc_info[1]))
mock_find.assert_called_once_with(self.ctx, 'FAKE_CLUSTER')
mock_cluster.assert_called_once_with(self.ctx, dbcluster=x_db_cluster)
mock_nodes.assert_called_once_with(self.ctx, '12345678AB',
filters={'role': 'slave'})
self.assertEqual(0, mock_action.call_count)
| 0 |
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Commander gather Variables to set them with an UpdateList.
The command process can be AllSetsForEach (ie a map of the update succesively for each)
or a EachSetForAll (ie each set is a map of each).
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Itemizers.Pather"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Standards.Itemizers import Getter,Setter
#</ImportSpecificModules>
#<DefineLocals>
CommandPrefixStr="--"
CommandWalkStr="..."
CommandSelfStr="/"
CommandAddStr="+"
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class CommanderClass(BaseClass):
def default_init(
self,
_CommandingGetVariable=None,
_CommandingSetVariable=None,
_CommandingOrderStr="AllSetsForEachGet",
_CommandingBeforeWalkRigidBool=False,
_CommandingAfterWalkRigidBool=False,
_CommandingBeforeSelfRigidBool=False,
_CommandingAfterSelfRigidBool=False,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_command(self):
""" """
#/####################/#
# Adapt the type for getting things to command
#
#debug
'''
self.debug(
[
'Adapt the type for getting things to command',
("self.",self,[
'CommandingGetVariable',
'CommandingSetVariable'
])
]
)
'''
#Check
if type(self.CommandingGetVariable)!=list:
#debug
'''
self.debug(
[
'We get nicely',
('self.',self,['CommandingGetVariable'])
]
)
'''
#get
CommandedValueVariablesList=self[
self.CommandingGetVariable
]
#Check
if type(CommandedValueVariablesList)!=list:
CommandedValueVariablesList=[CommandedValueVariablesList]
else:
#map a get
CommandedValueVariablesList=map(
lambda __CommandingGetVariable:
self[__CommandingGetVariable],
self.CommandingGetVariable
)
#flat maybe
CommandedValueVariablesList=SYS.flat(CommandedValueVariablesList)
#filter
CommandedValueVariablesList=SYS.filterNone(CommandedValueVariablesList)
#debug
'''
self.debug(
[
'in the end, CommandedValueVariablesList is ',
SYS._str(CommandedValueVariablesList)
]
)
'''
#/###################/#
# Check if we have to walk before
#
#Check
if self.CommandingBeforeWalkRigidBool:
#debug
'''
self.debug(
[
'we are going to walk before the command',
'CommandedValueVariablesList is '+SYS._str(CommandedValueVariablesList),
'self.getDoing(SYS.CommanderClass).values() is '+SYS._str
(self.getDoing(
SYS.CommanderClass).values())
]
)
'''
#Debug
'''
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
'__CommandedValueVariable is '+SYS._str( __CommandedValueVariable)
)
#set
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*self.getDoing().values()
).set(
'GettingNewBool',True
)
'''
#set
CommandedOrderedDict=self.getDoing(
SYS.CommanderClass
)
CommandedOrderedDict['CommandingBeforeSelfRigidBool']=False
CommandedLiargVariablesList=CommandedOrderedDict.values()
#map the recursion but pay watch to not set new things to walk in...it is an infinite walk either !
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*CommandedLiargVariablesList
).set(
'GettingNewBool',True
),
CommandedValueVariablesList
)
#/####################/#
# Adapt the type for setting things in the commanded variables
#
#debug
'''
self.debug(
[
'Adapt the type for setting things in the commanded variables',
("self.",self,['CommandingSetVariable'])
]
)
'''
#Check
if type(self.CommandingSetVariable)!=list:
#Check
if hasattr(self.CommandingSetVariable,'items'):
#items
CommandedSetVariablesList=self.CommandingSetVariable.items()
elif type(self.CommandingSetVariable
)==str and self.CommandingSetVariable.startswith(
Getter.GetCallPrefixStr
):
#list
CommandedSetVariablesList=[
('get',self.CommandingSetVariable)
]
else:
#list
CommandedSetVariablesList=[
self.CommandingSetVariable
]
else:
#alias
CommandedSetVariablesList=self.CommandingSetVariable
#debug
'''
self.debug(
[
'in the end, CommandedSetVariablesList is ',
SYS._str(CommandedSetVariablesList)
]
)
'''
#/###################/#
# Ok now we command locally
#
#Check
if self.CommandingBeforeSelfRigidBool:
#debug
'''
self.debug(
'We command before self here'
)
'''
#add
self[Setter.SetMapStr](CommandedSetVariablesList)
#Check for the order
if self.CommandingOrderStr=="AllSetsForEachGet":
#map
map(
lambda __CommandedValueVariable:
map(
lambda __CommandedSetVariable:
__CommandedValueVariable.set(
*__CommandedSetVariable
),
CommandedSetVariablesList
),
CommandedValueVariablesList
)
elif self.CommandingOrderStr=="EachSetForAllGets":
#map
map(
lambda __CommandedSetVariable:
map(
lambda __CommandedValueVariables:
__CommandedValueVariables.set(
*__CommandedSetVariable
),
CommandedValueVariablesList
),
CommandedSetVariablesList
)
#Check
if self.CommandingAfterSelfRigidBool:
#debug
'''
self.debug(
'We command after self here'
)
'''
#add
self[Setter.SetMapStr](CommandedSetVariablesList)
#/###################/#
# And we check for a walk after
#
#Check
if self.CommandingAfterWalkRigidBool:
#debug
'''
self.debug(
[
'we are going to walk the command',
'CommandedValueVariablesList is '+SYS._str(CommandedValueVariablesList)
]
)
'''
#Debug
'''
for __CommandedValueVariable in CommandedValueVariablesList:
#debug
self.debug(
'__CommandedValueVariable is '+SYS._str( __CommandedValueVariable)
)
#set
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*self.getDoing().values()
).set(
'GettingNewBool',True
)
'''
#set
CommandedOrderedDict=self.getDoing(
SYS.CommanderClass
)
CommandedOrderedDict['CommandingBeforeSelfRigidBool']=False
CommandedLiargVariablesList=CommandedOrderedDict.values()
#map the recursion but pay watch to not set new things to walk in...it is an infinite walk either !
map(
lambda __CommandedValueVariable:
__CommandedValueVariable.set(
'GettingNewBool',False
).command(
*CommandedLiargVariablesList
).set(
'GettingNewBool',True
),
CommandedValueVariablesList
)
def mimic_get(self):
#debug
'''
self.debug(
('self.',self,[
'GettingKeyVariable',
])
)
'''
#Check
if type(self.GettingKeyVariable)==str:
#Check
if self.GettingKeyVariable.startswith(CommandAddStr):
#split
AddGetKeyStrsList=self.GettingKeyVariable.split(CommandAddStr)[1:]
#debug
'''
self.debug(
[
'We map get',
'AddGetKeyStrsList is '+str(AddGetKeyStrsList)
]
)
'''
#map get
AddVariablesList=self[
Getter.GetMapStr
](*AddGetKeyStrsList).ItemizedMapValueVariablesList
#debug
'''
self.debug(
[
'We sum now',
'AddVariablesList is '+SYS._str(AddVariablesList)
]
)
'''
#map get
self.GettedValueVariable=SYS.sum(AddVariablesList)
#return
return {'HookingIsBool':False}
#return
return BaseClass.get(self)
def mimic_set(self):
#debug
'''
self.debug(
('self.',self,[
'SettingKeyVariable',
'SettingValueVariable'
])
)
'''
#Check
if type(self.SettingKeyVariable)==str:
#Check
if self.SettingKeyVariable.startswith(
CommandPrefixStr
):
#debug
'''
self.debug(
'We command here'
)
'''
#deprefix
CommandGetKeyStr=SYS.deprefix(
self.SettingKeyVariable,
CommandPrefixStr
)
#Check
if CommandGetKeyStr.startswith(CommandWalkStr):
#debug
'''
self.debug(
'We command-walk here'
)
'''
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandWalkStr
),
self.SettingValueVariable,
_AfterWalkRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
elif CommandGetKeyStr.startswith(CommandSelfStr+CommandWalkStr):
#debug
'''
self.debug(
'We command-self-walk here'
)
'''
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandSelfStr+CommandWalkStr
),
self.SettingValueVariable,
_AfterWalkRigidBool=True,
_SelfBool=True
)
#stop the setting
return {'HookingIsBool':False}
else:
#command
self.command(
CommandGetKeyStr,
self.SettingValueVariable
)
#stop the setting
return {'HookingIsBool':False}
#Check
elif self.SettingKeyVariable.startswith(
CommandWalkStr
):
#debug
'''
self.debug(
'We walk-command here'
)
'''
CommandGetKeyStr=SYS.deprefix(
self.SettingKeyVariable,
CommandWalkStr
)
#Check
if CommandGetKeyStr.startswith(CommandPrefixStr):
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
elif CommandGetKeyStr.startswith(CommandSelfStr):
#command
self.command(
SYS.deprefix(
CommandGetKeyStr,
CommandSelfStr+CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True,
_AfterSelfRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
#Check
elif self.SettingKeyVariable.startswith(
CommandSelfStr+CommandWalkStr+CommandPrefixStr
):
#command
self.command(
SYS.deprefix(
self.SettingKeyVariable,
CommandSelfStr+CommandWalkStr+CommandPrefixStr
),
self.SettingValueVariable,
_BeforeWalkRigidBool=True,
_BeforeSelfRigidBool=True
)
#stop the setting
return {'HookingIsBool':False}
#debug
'''
self.debug(
[
'Call the base set method',
'BaseClass is '+str(BaseClass),
('self.',self,['SettingKeyVariable'])
]
)
'''
#Call the base method
BaseClass.set(self)
#</DefineClass>
#</DefinePrint>
CommanderClass.PrintingClassSkipKeyStrsList.extend(
[
'CommandingGetVariable',
'CommandingSetVariable',
'CommandingOrderStr',
'CommandingBeforeWalkRigidBool',
'CommandingAfterWalkRigidBool',
'CommandingBeforeSelfRigidBool',
'CommandingAfterSelfRigidBool'
]
)
#<DefinePrint> | 0.054856 |
#
# Copyright (C) 2014 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Forms for PortAdmin"""
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms_foundation.layout import Layout, Row, Column, Submit
class SearchForm(forms.Form):
"""Form for searching for ip-devices and interfaces"""
query = forms.CharField(
label='',
widget=forms.TextInput(
attrs={'placeholder': 'Search for ip device or interface'}))
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = 'portadmin-index'
self.helper.form_method = 'GET'
self.helper.layout = Layout(
Row(
Column('query', css_class='medium-9'),
Column(Submit('submit', 'Search', css_class='postfix'),
css_class='medium-3'),
css_class='collapse'
)
)
| 0 |
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'travelize.com',
'name': 'travelize'
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'example.com'
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| 0 |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Networks.
"""
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks \
import forms as project_forms
from openstack_dashboard.dashboards.project.networks.ports \
import tables as port_tables
from openstack_dashboard.dashboards.project.networks.subnets \
import tables as subnet_tables
from openstack_dashboard.dashboards.project.networks \
import tables as project_tables
from openstack_dashboard.dashboards.project.networks \
import workflows as project_workflows
class IndexView(tables.DataTableView):
table_class = project_tables.NetworksTable
template_name = 'project/networks/index.html'
def get_data(self):
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(self.request,
tenant_id)
except Exception:
networks = []
msg = _('Network list can not be retrieved.')
exceptions.handle(self.request, msg)
for n in networks:
n.set_id_as_name_if_empty()
return networks
class CreateView(workflows.WorkflowView):
workflow_class = project_workflows.CreateNetwork
def get_initial(self):
pass
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateNetwork
template_name = 'project/networks/update.html'
context_object_name = 'network'
success_url = reverse_lazy("horizon:project:networks:index")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["network_id"] = self.kwargs['network_id']
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
network_id = self.kwargs['network_id']
try:
return api.neutron.network_get(self.request, network_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
network = self._get_object()
return {'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'admin_state': network['admin_state_up']}
class DetailView(tables.MultiTableView):
table_classes = (subnet_tables.SubnetsTable, port_tables.PortsTable)
template_name = 'project/networks/detail.html'
def get_subnets_data(self):
try:
network = self._get_data()
subnets = api.neutron.subnet_list(self.request,
network_id=network.id)
except Exception:
subnets = []
msg = _('Subnet list can not be retrieved.')
exceptions.handle(self.request, msg)
for s in subnets:
s.set_id_as_name_if_empty()
return subnets
def get_ports_data(self):
try:
network_id = self.kwargs['network_id']
ports = api.neutron.port_list(self.request, network_id=network_id)
except Exception:
ports = []
msg = _('Port list can not be retrieved.')
exceptions.handle(self.request, msg)
for p in ports:
p.set_id_as_name_if_empty()
return ports
@memoized.memoized_method
def _get_data(self):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
return network
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
network = self._get_data()
context["network"] = network
table = project_tables.NetworksTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(network)
return context
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:project:networks:index')
| 0 |
# encoding: cinje
: def searchwindow ctx
<div id="searchModal" class="modal fade" role="dialog">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal">×</button>
<h4 class="modal-title"> Advanced Search</h4>
</div>
<div class="modal-body">
<p>You may use SQL % and _ wildcards</p>
<img id='ricon' style='display:none' src='/public/icons/catgif8.gif'>
<form id='searchform' action='/search' class='ajax' method='post' data-append='#main-content' role='search'>
<div class='form-group'>
<label for='advsearchtype'>Search For</label>
<select class="form-control" id="advsearchtype" name="advsearchtype">
<option>Artist</option>
<option>Album</option>
<option>Title</option>
</select>
</div>
<input type="search" class="form-control" id="advsearchtext" name="advsearchtext" placeholder="Search">
<button type="submit" class="btn btn-primary">Search</button>
<button type="button" class="btn btn-danger" data-dismiss="modal">Close</button>
</form>
</div>
</div>
</div>
</div>
: end
| 0.095833 |
import king_phisher.client.plugins as plugins
import king_phisher.client.gui_utilities as gui_utilities
try:
from bs4 import BeautifulSoup
except ImportError:
has_bs4 = False
else:
has_bs4 = True
class Plugin(plugins.ClientPlugin):
authors = ['Mike Stringer']
classifiers = ['Plugin :: Client :: Email :: Spam Evasion']
title = 'Message Plaintext'
description = """
Parse and include a plaintext version of an email based on the HTML version.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = []
req_min_version = '1.10.0'
version = '1.0'
req_packages = {
'bs4' : has_bs4
}
def initialize(self):
mailer_tab = self.application.main_tabs['mailer']
self.signal_connect('message-create', self.signal_message_create, gobject=mailer_tab)
self.signal_connect('send-precheck', self.signal_send_precheck, gobject=mailer_tab)
return True
def signal_message_create(self, mailer_tab, target, message):
html_part = next((part for part in message.walk() if part.get_content_type().startswith('text/html')), None)
if html_part is None:
self.logger.error('unable to generate plaintext message from HTML (failed to find text/html part)')
return False
text_part = next((part for part in message.walk() if part.get_content_type().startswith('text/plain')), None)
if text_part is None:
self.logger.error('unable to generate plaintext message from HTML (failed to find text/plain part)')
return False
soup = BeautifulSoup(html_part.payload_string, 'html.parser')
plaintext_payload_string = soup.get_text()
for a in soup.find_all('a', href=True):
if 'mailto:' not in a.string:
plaintext_payload_string = plaintext_payload_string.replace(a.string, a['href'])
text_part.payload_string = plaintext_payload_string
self.logger.debug('plaintext modified from html successfully')
def signal_send_precheck(self, mailer_tab):
if 'message_padding' not in self.application.plugin_manager.enabled_plugins:
return True
proceed = gui_utilities.show_dialog_yes_no(
'Warning: You are running a conflicting plugin!',
self.application.get_active_window(),
'The "message_padding" plugin conflicts with "message_plaintext" in such a way '\
+ 'that will cause the message padding to be revealed in the plaintext version '\
+ 'of the email. It is recommended you disable one of these plugins, or append '\
+ 'additional line breaks in the HTML to conceal it.\n\n' \
+ 'Do you wish to continue?'
)
return proceed
| 0.026811 |
import sensor, image, time
from pyb import UART
#define
#Lab(Lmin,Lmax,Amin,Amax,Bmin,Bmax)
A4Black = [(0, 20, -10, 10, -5, 10)]
A4White = [(90,100,-10,10,-5,5)]
A4Red = [(40,65,40,65,0,25)]
Black = [(15,35,-10,20,-30,0)]
White = [(90,100,-5,5,-5,10)]
#thresholds = [A4Black,A4White,A4Red]
MID_POINT = [160,120]
def InitColorTrace():
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
def BlobX(x):
return x[0]
def BlobY(x):
return x[1]
def BlobL(x):
return x[2]
def BlobW(x):
return x[3]
def BlobMid(x):
return x[4]
def BlobPix(x):
return x[5]
def BlobPos(x):
Dict = {}
Dict['LU']=[x[0],x[1]]
Dict['LB']=[x[0],x[1]-x[2]]
Dict['RU']=[x[0]+x[3],x[1]]
Dict['RB']=[x[0]+x[3],x[1]-x[2]]
Dict['MID']=[x[0]+int(x[3]/2),x[1]-int(x[2]/2)]
Dict['Flag']=1
return Dict
def ReturnStr(x):
if len(x)<2:
x = '000'+x
return x
elif len(x)<3:
x = '00'+x
return x
elif len(x)<4:
x = '0'+x
return x
else:
return x[-4:]
def ANO_Send_Speed_S(param):
send_str = 'aaaa0b02'
x = ReturnStr(str(hex(param[0]).replace('x','')))
send_str = send_str + x
y = ReturnStr(str(hex(param[1]).replace('x','')))
send_str = send_str + y
_sum = ReturnStr(str(hex(353+param[0]+param[1]).replace('x','')))
send_str = send_str + _sum
print(send_str)
return send_str
#def FindBall(img,param):
#for blob in img.find_blobs(param,pixels_threshold=200,area_threshold=200):
#if BlobW(blob)*BlobL(blob)<=1200 and BlobW(blob)*BlobL(blob)>=500:
#img.draw_rectangle(blob.rect())
#img.draw_cross(blob.cx(),blob.cy())
#_dict = BlobPos(blob)
#print("LR:(%d,%d) "%(_dict['LU'][0],_dict['LU'][1]))
#print(_dict['MID'])
##uart.write(str(_dict['MID'][0]))
#a=_dict['MID']
#ANO_Send_Speed_S(a)
#uart.write(ANO_Send_Speed_S(a))
def FindBall(img,param):
for blob in img.find_blobs(param,pixels_threshold=200,area_threshold=200):
if BlobW(blob)*BlobL(blob)<=1200 and BlobW(blob)*BlobL(blob)>=500:
img.draw_rectangle(blob.rect())
img.draw_cross(blob.cx(),blob.cy())
_dict = BlobPos(blob)
a=_dict['MID']
return ANO_Send_Speed_S(a)
def FindBlackArea(img):
for blob in img.find_blobs(A4Black,pixels_threshold=10000,area_threshold=10000):
if BlobW(blob)*BlobL(blob)<=50000:
img.draw_rectangle(blob.rect())
img.draw_cross(blob.cx(),blob.cy())
#print('BlackArea: ',BlobPos(blob))
def FindBlackAreaForCraft(img):
for blob in img.find_blobs(Black,pixels_threshold=200,area_threshold=200):
if BlobW(blob)*BlobL(blob)<=5000:
img.draw_rectangle(blob.rect())
img.draw_cross(blob.cx(),blob.cy())
return BlobPos(blob)
_dict = {}
_dict['Flag']=0
return _dict
def FindWhiteAreaForCraft(img):
for blob in img.find_blobs(White,pixels_threshold=200,area_threshold=200):
if BlobW(blob)*BlobL(blob)<=5000:
img.draw_rectangle(blob.rect())
img.draw_cross(blob.cx(),blob.cy())
return BlobPos(blob)
_dict = {}
_dict['Flag']=0
return _dict
InitColorTrace()
clock = time.clock()
uart = UART(3, 115200)
while(True):
clock.tick()
img = sensor.snapshot()
white = FindWhiteAreaForCraft(img)
black = FindBlackAreaForCraft(img)
if white['Flag']==1 and black['Flag']==1:
w_p = white['MID']
b_p = black['MID']
if abs(w_p[0]-b_p[0])<=100 and abs(w_p[1]-b_p[1])<=100:
point_x = int((w_p[0]+b_p[0])/2)
point_y = int((w_p[1]+b_p[1])/2)
print('MID:(%d,%d)\n'%(point_x,point_y))
| 0.027316 |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.utils import timezone
from datetime import date, datetime
from pytz import timezone as tz
def datestring_to_datetime(date_str, tz_name="UTC"):
"""
Converts an iso8601 date string to a datetime.datetime object
:param: date_str
:type: str
:returns: datetime equivalent to string
:type: datetime
"""
if isinstance(date_str, (str)):
fmts = ("%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%S.%f",
"%Y-%m-%dT%H:%M:%SZ", "%Y-%m-%dT%H:%M:%S")
for fmt in fmts:
try:
dt = timezone.make_aware(
datetime.strptime(date_str, fmt), timezone=tz(tz_name))
if dt.year < 1900:
err_msg = (f"Date {date_str} is out of range. "
f"Year must be year >= 1900.")
raise ValueError(err_msg)
return dt
except ValueError:
pass
err_msg = f"Unsupported date format. {date_str}"
raise ValueError(err_msg)
elif isinstance(date_str, datetime):
return date_str # already a date
else:
raise ValueError(f"Got {date_str} expected str.")
def get_relative_week(relative_date, cmp_dt=None, tz_name="UTC"):
"""
Returns week number relative to supplied relative_date. If cmp_dt is
supplied, then returns number of weeks between supplied relative_date and
cmp_dt. If cmp_dt is not supplied, then returns number of weeks between
supplied relative_date and the current utc date.
"""
if cmp_dt is None:
cmp_dt = timezone.make_aware(datetime.now(),
timezone=tz(tz_name))
if isinstance(relative_date, date):
relative_date = timezone.make_aware(
datetime.combine(relative_date, datetime.min.time()),
timezone=tz(tz_name))
days = (cmp_dt - relative_date).days
if days >= 0:
return (days // 7) + 1
return (days // 7)
def get_term_number(quarter_name):
"""
Returns quarter info for the specified code.
:param quarter_name: name of the quarter
:type value: str
"""
quarter_definitions = {
"WINTER": 1,
"SPRING": 2,
"SUMMER": 3,
"AUTUMN": 4,
}
try:
return quarter_definitions[quarter_name.upper()]
except KeyError:
raise ValueError(f"Quarter name {quarter_name} not found. Options are "
f"WINTER, SPRING, SUMMER, and AUTUMN.")
def get_view_name(sis_term_id, week, label):
sis_term_id = sis_term_id.replace("-", "_")
view_name = f"{sis_term_id}_week_{week}_{label}"
return view_name
def chunk_list(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
| 0 |
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, MultiIndex, date_range
def test_partial_string_timestamp_multiindex():
# GH10331
dr = pd.date_range('2016-01-01', '2016-01-03', freq='12H')
abc = ['a', 'b', 'c']
ix = pd.MultiIndex.from_product([dr, abc])
df = pd.DataFrame({'c1': range(0, 15)}, index=ix)
idx = pd.IndexSlice
# c1
# 2016-01-01 00:00:00 a 0
# b 1
# c 2
# 2016-01-01 12:00:00 a 3
# b 4
# c 5
# 2016-01-02 00:00:00 a 6
# b 7
# c 8
# 2016-01-02 12:00:00 a 9
# b 10
# c 11
# 2016-01-03 00:00:00 a 12
# b 13
# c 14
# partial string matching on a single index
for df_swap in (df.swaplevel(),
df.swaplevel(0),
df.swaplevel(0, 1)):
df_swap = df_swap.sort_index()
just_a = df_swap.loc['a']
result = just_a.loc['2016-01-01']
expected = df.loc[idx[:, 'a'], :].iloc[0:2]
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
# indexing with IndexSlice
result = df.loc[idx['2016-01-01':'2016-02-01', :], :]
expected = df
tm.assert_frame_equal(result, expected)
# match on secondary index
result = df_swap.loc[idx[:, '2016-01-01':'2016-01-01'], :]
expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# Even though this syntax works on a single index, this is somewhat
# ambiguous and we don't want to extend this behavior forward to work
# in multi-indexes. This would amount to selecting a scalar from a
# column.
with pytest.raises(KeyError):
df['2016-01-01']
# partial string match on year only
result = df.loc['2016']
expected = df
tm.assert_frame_equal(result, expected)
# partial string match on date
result = df.loc['2016-01-01']
expected = df.iloc[0:6]
tm.assert_frame_equal(result, expected)
# partial string match on date and hour, from middle
result = df.loc['2016-01-02 12']
expected = df.iloc[9:12]
tm.assert_frame_equal(result, expected)
# partial string match on secondary index
result = df_swap.loc[idx[:, '2016-01-02'], :]
expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]]
tm.assert_frame_equal(result, expected)
# tuple selector with partial string match on date
result = df.loc[('2016-01-01', 'a'), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# Slicing date on first level should break (of course)
with pytest.raises(KeyError):
df_swap.loc['2016-01-01']
# GH12685 (partial string with daily resolution or below)
dr = date_range('2013-01-01', periods=100, freq='D')
ix = MultiIndex.from_product([dr, ['a', 'b']])
df = DataFrame(np.random.randn(200, 1), columns=['A'], index=ix)
result = df.loc[idx['2013-03':'2013-03', :], :]
expected = df.iloc[118:180]
tm.assert_frame_equal(result, expected)
| 0 |
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Domain Classes for generating repository metadata from RHN info.
#
class Channel:
""" A pure data object representing an RHN Channel. """
def __init__(self, channel_id):
self.id = channel_id
self.label = None
self.name = None
self.checksum_type = None
self.num_packages = 0
self.packages = []
self.errata = []
self.updateinfo = None
self.comps = None
class Package:
""" A pure data object representing an RHN Package. """
def __init__(self, package_id):
self.id = package_id
self.name = None
self.version = None
self.release = None
self.epoch = 0
self.arch = None
self.checksum = None
self.checksum_type = None
self.summary = None
self.description = None
self.vendor = None
self.build_time = None
self.package_size = None
self.payload_size = None
self.installed_size = None
self.header_start = None
self.header_end = None
self.package_group = None
self.build_host = None
self.copyright = None
self.filename = None
self.source_rpm = None
self.files = []
self.provides = []
self.requires = []
self.conflicts = []
self.obsoletes = []
self.supplements = []
self.enhances = []
self.suggests = []
self.recommends = []
self.changelog = []
class Erratum:
""" An object representing a single update to a channel. """
def __init__(self, erratum_id):
self.id = erratum_id
self.readable_id = None
self.title = None
self.advisory_type = None
self.version = None
self.issued = None
self.updated = None
self.synopsis = None
self.description = None
self.bz_references = []
self.cve_references = []
# We don't want to pickle a single package multiple times,
# So here's a list to store the ids and we can swap out the
# Actual objects when its time to pickle. This should be replaced
# With something that keeps the concepts seperate.
self.package_ids = []
self.packages = []
class Comps:
def __init__(self, comps_id, filename):
self.id = comps_id
self.filename = filename
| 0 |
#!/usr/bin/python
import argparse, csv, os, sys, re #std python imports
import numpy as np
from sklearn.ensemble import RandomForestClassifier #RF classifier from SKlearn
from sklearn.cross_validation import cross_val_score #validation stats from SKlearn
import itertools
import multiprocessing as mp #allows for parallelization of the classification to speed up script.
#########################
#Args
#########################
parser=argparse.ArgumentParser(description="Runs RandomForest classifier on WHAM output VCF files to classify structural variant type. Appends WC and WP flags for user to explore structural variant calls. The output is a VCF file written to standard out.")
parser.add_argument("VCF", type=str, help="User supplied VCF with WHAM variants; VCF needs AT field data")
parser.add_argument("training_matrix", type=str, help="training dataset for classifier derived from simulated read dataset")
parser.add_argument("--filter", type=str, help="optional arg for filtering type one of : ['sensitive', 'specific']; defaults to output all data if filtering if argument is not supplied.")
parser.add_argument("--proc", type=str, help="optional arg for number of proceses to run with classifier; higher thread number will increase speed of classifier; defaults to 1")
parser.add_argument("--minclassfreq", default=0, type=float, help="optional arg for minimum frequency required for classification, otherwise variant set as unknown (UNK). Default is to classify everything.")
arg=parser.parse_args()
#########################
#Functions
#########################
#class object for processing VCF files.
class vcf:
"""
class vcf generates an iterator for looping through a vcf
file. Can add various functionalities here to further process
the vcf in a number of ways
chunksize = number of lines to process at once for parallel applications.
"""
def __init__(self,file):
self.f = open(file,'r')
#proces all of the header lines of the vcf.
header = True #boolean to continye looping through header
info_boolean = False #need a boolean to trigger and append new INFO fields
while header:
self.line = self.f.readline()
line = self.line.strip()
line = line.split("\t") #split line on tabs
if line[0][0] == '#': #process header lines
if re.search("##FORMAT", line[0]) and info_boolean == False: #first instance of ##FORMAT..
#need to append new INFO fields for the corresponding data
print '##INFO=<ID=WC,Number=1,Type=String,Description="WHAM classifier variant type">'
print '##INFO=<ID=WP,Number=4,Type=Float,Description="WHAM probability estimate for each structural variant classification from RandomForest model">'
info_boolean = True #reset boolean to
print "\t".join( line ) #print results to stdout
else:
header = False #break out of the loop
def __iter__(self):
return self
def next(self, chunksize=5000):
cnt = 0 #boolean for chunking.
return_array = [] #initialize empty array to store line data.
#check here if we are currently on last line, and raise StopIteration to exit next()
if len(self.line) == 0: #
raise StopIteration
while cnt < chunksize:
line = self.line
if len( line ) == 0:
return( return_array )
break #break out of loop because we are at last line in file.
else:
return_array.append( line )
self.line = self.f.readline()
cnt += 1
return( return_array )
#parse the targets for ML. converts text list of classified data
#into a numerical dataset with links to the classified names
def parse_targets( target ):
"""
target = list of factors to be turned into numerical classifiers.
for machine learning classifiction. ie. converts INR, DEL, INV,
DUP into integer factors
"""
target = np.array(target) #convert to np array for ease in processing
names = np.unique( target ) #unique names of SV types (factors)
#now iterate through and classify to an integer for SKlearn
cnt = 0
target_numerical = np.zeros( target.shape[0] ) #generate empty dataset
for name in names:
idx = np.where( name == target )
target_numerical[ idx ] = cnt
cnt += 1
#setup return data structure
RV = {'names': names, 'target': target_numerical}
#can use index of SV type in 'names' to get text based variant
#call from 'target', where they've been converted to integers.
return( RV )
#method to run observed data through the trained model to output
#a vcf friendly return of classified variant call and the prediction
#probabilities for each call
def classify_data( _x, clf, names, minclassfreq=None ):
"""
_x = pass the col 8 from vcf
clf = machine learning object
names = string names, zero indexed of variant calls.
"""
_x = np.array(_x)
#pdb.set_trace()
# index = [16,17,18]
# _x = map(lambda(x):_x[x], index)
class_idx = int( clf.predict(_x) )#predict classifier. can link back to dataset['target_names']
prediction = names[ class_idx ] #lookup text based name for classification
class_probs = clf.predict_proba(_x)[0] #gives weights for your predictions 1:target_names
#if minclass is set and is below threshold, we change prediction to UKN - unknown
if minclassfreq and class_probs[class_idx] < minclassfreq:
prediction = "UKN" # set as unknown, not enough evidence for classification
#convert back to text comma separated list
class_str = ",".join( [ str(i) for i in class_probs ] )
#this is a terrible hack that make my soul hurt, but gets the job done for
# dels called dups.
#parse the two data fields into a string so they can be appended to the vcf file.
return_str = "WC=" + prediction + ";WP=" + class_str
return( return_str )
#A general parser that takes the data in VCF flag field and parses it into a
#dictionary data structure. Can then obtain whatever data needed by using
# RV['key']; ie. RV['GT'] ...
def parse_vcf_data( vdat ):
"""
vdat = string; column 8 from VCF file with INFO fields.
"""
#start by parsing the vcf data into a dictionary structure
#will be keyed by ["XX="] = data
dict = {}
vdat = vdat.split(";")
for v in vdat:
try:
v = v.split("=") #split key and data into list
except:
print "not valid VCF file"
dict[ v[0] ] = v[1] #setup dict struct
#return the dictionary structure data with info fields as keys.
return( dict )
#takes vcf field data and runs various filtering specs.
def run_filters( vdat, filtering = None ):
"""
vdat - dictionary of INFO field from VCF line
filtering - dictionary of fields to be filtered; defaults to None
Currently implemented for sensitive and specific. Can modify the
filters to return False anytime you want to not report results based
on filteirng criterion from the INFO field.
"""
pass_filt = True #will remain true until we do not satisfy some criterion
if filtering == None:
return( pass_filt ) #break out early
#sensitive is very perimssive
elif filtering == "sensitive":
if int( vdat['NC'] ) < 2:
pass_filt = False
return( pass_filt )
if pass_filt:
return( pass_filt )
#specific mapping is more restrictive on the filtering.
elif filtering == "specific":
if vdat['ED'] == 'nan':
pass_filt = False
return( pass_filt )
BE = vdat['BE'].split(',')
if int(BE[-1]) < 2:
pass_filt = False
return( pass_filt )
if int( vdat['NC'] ) < 3:
pass_filt = False
return( pass_filt )
if pass_filt:
return( pass_filt )
#elif filtering == "user_defined":
# ....
else:
raise ValueError('Not a valid --filter argumuent\n please try running with --help arg for instructions')
#fuction will process line information and classify variant for a line in VCF file.
def process_vcf( info ):
"""
pass izip object of line object and other needed vars
info[0] = list of vcf lines from VCF object iterator.
info[1] = clf object
info[2] = dataset dictionary
info[3] = filter arg supplied by user
info[4] = min classification frequency supplied by user (defaults to None)
"""
#sys.stderr.write("... running process VCF with job id %d \n" %(os.getpid() ) )
#parse the args to function
line_list = info[0] #list of lines from VCF obj
clf = info[1] #randomForest object
dataset = info[2] #dataset with class names
filter = info[3] #filter arg supplied by user
minclassfreq = info[4]
#iterate over lines in the chunked data
return_list = []
for line in line_list:
line = line.strip().split("\t")
vdat = parse_vcf_data( line[7] ) #parse all of vcf appended data
filter_bool = run_filters( vdat, filtering=filter ) #boolean of whether line info passes filters
if filter_bool:
_x = vdat[ 'AT' ].split(",") #create list from data in 'AT' field
_x = _x[1:]
#results = classify_data( _x, clf, dataset['target_names'] )
results = classify_data( _x, clf, dataset['target_names'], minclassfreq )
line[7] = line[7] + ";" + results #append data to correct vcf column
#print "\t".join( line ) #print results to stdout
print_line = "\t".join( line )
return_list.append( print_line )
else:
return_list.append( None )
#return the full list of updated line data
return( return_list )
#########################
#MAIN
#########################
###########
#import and assign training data
###########
#all sklearn data will be in 2D array [ nsamples X nfeatures]
sys.stderr.write("processing training file... \n" )
#iterate over training file. select out the numerical and classifier data
data = []
target = []
with open(arg.training_matrix) as t:
for line in csv.reader(t,delimiter='\t'):
if line[0][0] == "#": #add in this statemnt to print error if user supplies files in wrong order.
raise ValueError('not a valid WHAM training file. perhaps you supplied arguments in the wrong order? \n please try running with --help arg for instructions')
target.append( line[-1] ) #always have targets [classified SV] as last column
#exclude first attribute
d = [ float(i) for i in line[1:-1] ]
data.append( d )
#populate the training dataset in sciKitLearn friendly structure.
dataset = {} #empty data
dataset[ 'data' ] = np.array( data ) #all training data into 2-D array
#turn our target list into integers and return target names
target_parse = parse_targets( target )
dataset[ 'target' ] = np.array( target_parse['target'] )
dataset[ 'target_names' ] = np.array( target_parse['names'] )
###########
#random forest classification
###########
#setup inital params
clf = RandomForestClassifier( n_estimators=500 )
#run RFC on dataset with target classifiers; runs the model fit
clf = clf.fit( dataset['data'], dataset['target'] )
######
#run some sanity checks here.
######
training_stats = clf.feature_importances_ #array of variable importances for model.
#print training stats to user
train_list = [ str(i) for i in training_stats ] #convert to str for printing to user.
sys.stderr.write("\t Training weights for RandomForest classifier \n\t N = %d training variables\n" %( len(train_list) ) )
sys.stderr.write("\t %s\n" %( "\t".join( train_list ) ) )
#need cross validation here. uses sklearn.cross_validation
scores = cross_val_score( clf, dataset['data'], dataset['target'] )
avg_val = scores.mean() * 100 #average cross validation levels
sys.stderr.write("\t results from cross validation:\n\t %f%s \n" %( avg_val, '%' ) )
######
#prediction and output
######
sys.stderr.write("processing VCF file through classifier... \n" )
sys.stderr.write("...running parent process with job id %d \n can use this ID to exit \n" %(os.getpid() ) )
sys.stderr.write("minclassfreq var is set to = %f \n" %( arg.minclassfreq ) )
#load VCF file into class obj
vcf_file = vcf(arg.VCF)
#parse the number of processes to enact
if arg.proc == None:
proc_num = 1
else:
proc_num = int( arg.proc )
###
#setup multiprocessing for the classification of SVs
###
p = mp.Pool( processes = proc_num )
results = p.imap(process_vcf, itertools.izip( vcf_file, itertools.repeat(clf), itertools.repeat(dataset), itertools.repeat(arg.filter), itertools.repeat(arg.minclassfreq) ) )
#iterate over the results and feed to stdout
for r in results:
for rv in r: #iterate over the list of returned results
if rv != None: #only print results that pass filtering specs.
print rv #write output to std out
#final output to std err that the run has finished.
sys.stderr.write("...classifier finished \n" )
| 0.046042 |
"""Test cases for Zinnia's comparison"""
from django.test import TestCase
from mots_vides import stop_words
from zinnia import comparison
from zinnia.models.entry import Entry
from zinnia.comparison import pearson_score
from zinnia.comparison import ModelVectorBuilder
from zinnia.comparison import CachedModelVectorBuilder
from zinnia.signals import disconnect_entry_signals
class ComparisonTestCase(TestCase):
"""Test cases for comparison tools"""
def setUp(self):
english_stop_words = stop_words('english')
self.original_stop_words = comparison.STOP_WORDS
comparison.STOP_WORDS = english_stop_words
disconnect_entry_signals()
def tearDown(self):
comparison.STOP_WORDS = self.original_stop_words
def test_raw_dataset(self):
params = {'title': 'My entry 1', 'content': 'My content 1.',
'tags': 'zinnia, test', 'slug': 'my-entry-1'}
Entry.objects.create(**params)
params = {'title': 'My entry 2', 'content': 'My content 2.',
'tags': 'zinnia, test', 'slug': 'my-entry-2'}
Entry.objects.create(**params)
v = ModelVectorBuilder(queryset=Entry.objects.all(), fields=['title'])
with self.assertNumQueries(1):
self.assertEqual(len(v.raw_dataset), 2)
self.assertEqual(sorted(v.raw_dataset.values()),
[['entry'], ['entry']])
v = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'content', 'tags'])
self.assertEqual(sorted(v.raw_dataset.values()),
[['entry', 'content', 'zinnia', 'test'],
['entry', 'content', 'zinnia', 'test']])
v = ModelVectorBuilder(queryset=Entry.objects.all().order_by('-pk'),
fields=['title'], limit=1)
self.assertEqual(list(v.raw_dataset.values()), [['entry']])
def test_column_dataset(self):
vectors = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'excerpt', 'content'])
with self.assertNumQueries(1):
self.assertEqual(vectors.dataset, {})
self.assertEqual(vectors.columns, [])
params = {'title': 'My entry 1 (01)', 'content':
'This is my first content 1 (01)',
'slug': 'my-entry-1'}
e1 = Entry.objects.create(**params)
params = {'title': 'My entry 2 (02)', 'content':
'My second content entry 2 (02)',
'slug': 'my-entry-2'}
e2 = Entry.objects.create(**params)
vectors = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'excerpt', 'content'])
self.assertEqual(vectors.columns, ['01', '02', 'content', 'entry'])
self.assertEqual(vectors.dataset[e1.pk], [2, 0, 1, 1])
self.assertEqual(vectors.dataset[e2.pk], [0, 2, 1, 2])
def test_pearson_score(self):
self.assertRaises(ZeroDivisionError, pearson_score,
[42], [42])
self.assertRaises(ZeroDivisionError, pearson_score,
[2, 2, 2], [1, 1, 1])
self.assertEqual(pearson_score([0, 1, 2], [0, 1, 2]), 1.0)
self.assertEqual(pearson_score([0, 1, 3], [0, 1, 2]),
0.9819805060619656)
self.assertEqual(pearson_score([0, 1, 2], [0, 1, 3]),
0.9819805060619656)
self.assertEqual(pearson_score([2, 0, 0, 0], [0, 1, 1, 1]),
-1)
def test_compute_related(self):
class VirtualVectorBuilder(ModelVectorBuilder):
dataset = {1: [1, 2, 3],
2: [1, 5, 7],
3: [2, 8, 3],
4: [1, 8, 3],
5: [7, 3, 5]}
v = VirtualVectorBuilder()
self.assertEqual(v.compute_related('error'), [])
self.assertEqual(v.compute_related(1),
[(2, 0.9819805060619659),
(4, 0.2773500981126146),
(3, 0.15554275420956382),
(5, -0.5)])
self.assertEqual(v.compute_related(2),
[(1, 0.9819805060619659),
(4, 0.4539206495016019),
(3, 0.33942211665106525),
(5, -0.6546536707079772)])
self.assertEqual(v.compute_related(3),
[(4, 0.9922153572367627),
(2, 0.33942211665106525),
(1, 0.15554275420956382),
(5, -0.9332565252573828)])
self.assertEqual(v.compute_related(4),
[(3, 0.9922153572367627),
(2, 0.4539206495016019),
(1, 0.2773500981126146),
(5, -0.9707253433941511)])
v.dataset[2] = [0, 0, 0]
self.assertEqual(v.compute_related(1),
[(4, 0.2773500981126146),
(3, 0.15554275420956382),
(5, -0.5)])
def test_get_related(self):
params = {'title': 'My entry 01', 'content':
'This is my first content 01',
'slug': 'my-entry-1'}
e1 = Entry.objects.create(**params)
vectors = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'content'])
with self.assertNumQueries(1):
self.assertEquals(vectors.get_related(e1, 10), [])
params = {'title': 'My entry 02', 'content':
'My second content entry 02',
'slug': 'my-entry-2'}
e2 = Entry.objects.create(**params)
with self.assertNumQueries(0):
self.assertEquals(vectors.get_related(e1, 10), [])
vectors = ModelVectorBuilder(queryset=Entry.objects.all(),
fields=['title', 'content'])
with self.assertNumQueries(2):
self.assertEquals(vectors.get_related(e1, 10), [e2])
with self.assertNumQueries(1):
self.assertEquals(vectors.get_related(e1, 10), [e2])
def test_cached_vector_builder(self):
params = {'title': 'My entry number 1',
'content': 'My content number 1',
'slug': 'my-entry-1'}
e1 = Entry.objects.create(**params)
v = CachedModelVectorBuilder(
queryset=Entry.objects.all(), fields=['title', 'content'])
with self.assertNumQueries(1):
self.assertEquals(len(v.columns), 3)
with self.assertNumQueries(0):
self.assertEquals(len(v.columns), 3)
with self.assertNumQueries(0):
self.assertEquals(v.get_related(e1, 5), [])
for i in range(1, 3):
params = {'title': 'My entry %s' % i,
'content': 'My content %s' % i,
'slug': 'my-entry-%s' % i}
Entry.objects.create(**params)
v = CachedModelVectorBuilder(
queryset=Entry.objects.all(), fields=['title', 'content'])
with self.assertNumQueries(0):
self.assertEquals(len(v.columns), 3)
with self.assertNumQueries(0):
self.assertEquals(v.get_related(e1, 5), [])
v.cache_flush()
with self.assertNumQueries(2):
self.assertEquals(len(v.get_related(e1, 5)), 2)
with self.assertNumQueries(0):
self.assertEquals(len(v.get_related(e1, 5)), 2)
with self.assertNumQueries(0):
self.assertEquals(len(v.columns), 3)
v = CachedModelVectorBuilder(
queryset=Entry.objects.all(), fields=['title', 'content'])
with self.assertNumQueries(0):
self.assertEquals(len(v.columns), 3)
with self.assertNumQueries(0):
self.assertEquals(len(v.get_related(e1, 5)), 2)
| 0 |
# Diff Tool - theme.py
# defines the theme object
import globalFunc as glb
class Theme:
def __init__(self, config):
# only parameter is the full path of the config.json file
self.themeFile = glb.themesDirectory() + config.theme
self.jsonString = ''
if len(self.themeFile) > 0:
self.clearObject()
self.getJsonString()
if len(self.jsonString) > 0:
self.loadTheme(True)
else:
print 'WARNING: No Theme file has been loaded. Please load a theme.'
pass
else:
print 'WARNING: No Theme file has been loaded. Please load a theme.'
pass
def getJsonString(self):
# returns a json string from self.themeFile
self.jsonString = glb.getJsonString(self.themeFile)
def clearObject(self):
self.name = ''
self.date = ''
self.author = ''
self.description = ''
self.body = []
self.main = []
self.diffHeader = []
self.diffBody = []
self.css = []
def loadTheme(self, firstLoad=False):
if not firstLoad:
# refresh json string, in case config has changed
self.getJsonString()
# populate object
self.name = self.jsonString["name"]
self.author = self.jsonString["author"]
self.date = self.jsonString["date"]
self.description = self.jsonString["description"]
self.body = self.jsonString["body"]
self.main = self.jsonString["main"]
self.diffHeader = self.jsonString["diffHeader"]
self.diffBody = self.jsonString["diffBody"]
def createTheme(self):
self.css.append('<style type="text/css">')
# body css
for d in self.body:
self.css.append('\t' + d)
# main content area
for m in self.main:
self.css.append('\t' + m)
# diff header area
for h in self.diffHeader:
self.css.append('\t' + h)
# diff body area
for b in self.diffBody:
self.css.append('\t' + b)
self.css.append('</style>')
def about(self):
print '=' * 80
print 'About: ' + self.name
print 'Created: ' + self.date
print 'Author: ' + self.author
print self.description
print '=' * 80
return ''
| 0.046 |
##########################################################
## ### ######## ######## ######## #######
## ## ## ## ## ## ## ## ##
## ## ## ## ## ## ## ## ##
## ## ## ######## ###### ## ## ##
## ## ######### ## ## ## ## ##
## ## ## ## ## ## ## ## ##
###### ## ## ## ######## ## #######
##########################################################
from flask import Flask
#~ from application import app
from db_class import Connection
from datetime import date
import sys
import json
import time
import datetime
"""
"""
__author__ = "JAPeTo <jeffersonamado@gmail.com>"
__date__ = "2 Octubre 2015"
__version__ = "$Revision: 1 $"
__credits__ = """ """
# Known bugs that can't be fixed here:
#~
#~
class Part:
def __init__(self,debug=False):
"""
"""
self.debug = debug
if self.debug: print "Creando objeto Part!\n"
self.codigo_repuesto = None
self.marca = None
self.proveedor = None
self.codigo_referencia = None
self.codigo_de_reemplazo = None
self.descripcion = None
self.numero_importacion = None
self.precio = None
def all_parts(self,codigo_sucursal=None):
"""
"""
if self.debug: print "Connect to database"
_cnx= Connection()
if self.debug: print "query all Parts"
self.response= _cnx.single_query("SELECT codigo_repuesto, marca, proveedor, codigo_referencia, codigo_de_reemplazo,descripcion, numero_importacion, precio, imagenes FROM repuesto,inventario where inventario.codigorepuesto = repuesto.codigo_repuesto and codigosucursal "+codigo_sucursal+" order by codigo_repuesto desc")
if (self.response is not None and len(self.response) > 0):
if self.debug: print len(self.response),"Part found"
#Si no es nulo y la longitud de la consulta es mayor que 1
return True,self.response
else:
if self.debug: print "NOT found Parts"
#return Flase, None
return False,None
def exist_part(self,marca=None,codigo_referencia=None):
"""
"""
if self.debug: print "Connect to database"
_cnx= Connection()
if self.debug: print "buscando el repuesto marca: "+marca+" y ref: "+codigo_referencia
self.response= _cnx.single_query("SELECT * FROM repuesto where marca = '"+marca+"' and codigo_referencia = '"+codigo_referencia+"'")
if (self.response is not None and len(self.response) > 0):
if self.debug: print len(self.response),"Part found"
#Si no es nulo y la longitud de la consulta es mayor que 1
return True
else:
if self.debug: print "NOT found Parts"
#return Flase, None
return False
def seek_part_byBrand(self,brand=None):
"""
"""
if self.debug: print "Connect to database"
_cnx= Connection()
if self.debug: print "query "+brand+" Part"
self.response= _cnx.single_query("SELECT * FROM repuesto where marca = '"+brand+"' ")
if (self.response is not None and len(self.response) > 0):
if self.debug: print len(self.response),"Part found"
#Si no es nulo y la longitud de la consulta es mayor que 1
return True,self.response
else:
if self.debug: print "NOT found Parts"
#return Flase, None
return False,None
def seek_part_byId(self,identifier=None):
"""
"""
if self.debug: print "Connect to database"
_cnx= Connection()
if self.debug: print "query Part with id = "+str(identifier)+" "
self.response= _cnx.single_query("SELECT * FROM repuesto where codigo_repuesto = '"+str(identifier)+"' ")
if (self.response is not None and len(self.response) > 0):
if self.debug: print len(self.response),"Part found"
#Si no es nulo y la longitud de la consulta es mayor que 1
return True,self.response
else:
if self.debug: print "NOT found subsidiaries"
#return Flase, None
return False,None
def seek_part_byImagen(self,identifier=None):
"""
"""
if self.debug: print "Connect to database"
_cnx= Connection()
if self.debug: print "query Part with id = "+str(identifier)+" "
self.response= _cnx.single_query("SELECT * FROM repuesto where imagenes = '"+str(identifier)+"' ")
if (self.response is not None and len(self.response) > 0):
if self.debug: print len(self.response),"Part found"
#Si no es nulo y la longitud de la consulta es mayor que 1
return True,self.response
else:
if self.debug: print "NOT found subsidiaries"
#return Flase, None
return False,None
def save_part(self,marca = None,proveedor = None,
codigo_referencia = None,codigo_de_reemplazo = None,descripcion = None,
numero_importacion = None,precio = None,imagepath=None):
"""
"""
if self.debug: print "Connect to database"
_cnx= Connection()
try:
if self.exist_part(marca,codigo_referencia):
raise Exception("El repuesto de marca <b>"+marca+"</b> y referencia <b>"+codigo_referencia+"</b> ya existe")
self.response= _cnx.no_single_query("INSERT INTO repuesto (marca,proveedor,"+
"codigo_referencia,codigo_de_reemplazo,descripcion,"+
"numero_importacion,precio,imagenes)"+
" VALUES ('"+marca+"','"+proveedor+"','"+codigo_referencia+
"','"+codigo_de_reemplazo+"','"+descripcion+"','"+
numero_importacion+"','"+precio+"','"+imagepath+"')")
_cnx.commit()
if self.debug: print "Insert (OK!)"
return True,"El repuesto de marca <b>"+marca+"</b> y referencia <b>"+codigo_referencia+"</b> ha sido creado <br/> <a href='/editpart'>Ver lista</a>"
except:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
if self.debug: print "NO Insert Error %s" % (exceptionValue)
return False, "%s" % (exceptionValue)
def update_part(self,codigo_repuesto = None,marca = None,proveedor = None,
codigo_referencia = None,codigo_de_reemplazo = None,descripcion = None,
numero_importacion = None,precio = None):
"""
"""
if self.debug: print "Connect to database"
_cnx= Connection()
try:
if self.debug: print "try update Part "+codigo_repuesto+" with state"
self.response= _cnx.no_single_query("UPDATE repuesto set marca = '"+marca+"',proveedor = '"+proveedor+
"',codigo_referencia = '"+codigo_referencia+"',codigo_de_reemplazo = '"+codigo_de_reemplazo+"',descripcion = '"+descripcion+
"',numero_importacion = '"+numero_importacion+"',precio = '"+precio+
"' where codigo_repuesto = '"+str(codigo_repuesto)+"'")
_cnx.commit()
if self.debug: print "Update (OK!)"
return True,"El repuesto de marca <b>"+marca+"</b> y referencia <b>"+codigo_referencia+"</b> ha sido actualizado. <br/> <a href='/editpart'>Ver lista</a>"
except:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
if self.debug: print "No update %s" % (exceptionValue)
return False, "%s" % (exceptionValue)
def delete_part(self,codigo_repuesto =None,):
"""
"""
if self.debug: print "Connect to database"
_cnx= Connection()
try:
if self.debug: print "try delete Part "+str(codigo_repuesto)+" "
self.response= _cnx.no_single_query("DELETE from repuesto where codigo_repuesto='"+str(codigo_repuesto)+"'")
_cnx.commit()
if self.debug: print "DELETED (OK!)"
return True,"El repuesto de codigo <b>"+codigo_repuesto+"</b> ha sido eliminado"
except:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
if self.debug: print "No update %s" % (exceptionValue)
return False, "ERROR EN LA BASE DE DATOS %s" % (exceptionValue)
###############################################################################################################
# r =Part(True)
# r.all_parts();
# r.save_part("1","ZNP","ZNP","ZNP-28752","F-00M-991-061, 30737517, 9459747, 11081, 11091","Alternator Clutch Pulley Fits Chrysler 200, Sebring 2.4L, Jeep Compass 2.0L, 2.4L, Patriot 2.0L, 2.4L, Dodge Avenger 1.8L, 2.0L, 2.4L, Journey 2.4L 04891737AB, 4891737AB, 04801323AB, 04801323AC, A2TJ0481, A2TJ0481ZC, 11228, 11231, 11440","81319fe1c21bc2ab3d79e244d5cc0b2b","$32.95");
# r.seek_part_byBrand("ZNP");
# r.seek_part_byId(1)
# r.update_part("3","ZNP","ZNP","ZNP-28752"," 11081, 11091","Jeep Compass 2.0L, 2.4L, Patriot 2.0L, 2.4L, Dodge Avenger 1.8L, 2.0L, 2.4L, Journey 2.4L 04891737AB","81319fe1c21bc2ab3d79e244d5cc0b2b","$32.95");
# r.delete_part("3");
| 0.049411 |
import os
import http
class IndexDoc:
expression = '^$'
@classmethod
def handle_request(self, handler, request, tokens):
request._data['path_path'] = 'static/index.html'
StaticDoc.handle_request(handler, request, {'filename': 'index.html'})
class ImageDoc:
expression = '^image/(?P<filename>.*)$'
@classmethod
def handle_request(self, handler, request, tokens):
path = os.path.join(handler.settings['screenshotspath'], tokens['filename'])
if 'header_If-Modified-Since' in request._data:
response = http.FileResponse(path, request._data['header_If-Modified-Since'])
else:
response = http.FileResponse(path)
handler.do_write(response.encode())
class StaticDoc:
expression = '^static/(?P<filename>.*)$'
@classmethod
def handle_request(self, handler, request, tokens):
path = os.path.join(handler.settings['basedir'], 'static', tokens['filename'])
if 'header_If-Modified-Since' in request._data:
response = http.FileResponse(path, request._data['header_If-Modified-Since'])
else:
response = http.FileResponse(path)
handler.do_write(response.encode())
class AJAXDoc:
expression = '^ajax$'
class UpdatesDoc:
expression = '^updates'
| 0.033956 |
import numpy as np
def unique_rows(ar):
"""Remove repeated rows from a 2D array.
In particular, if given an array of coordinates of shape
(Npoints, Ndim), it will remove repeated points.
Parameters
----------
ar : 2-D ndarray
The input array.
Returns
-------
ar_out : 2-D ndarray
A copy of the input array with repeated rows removed.
Raises
------
ValueError : if `ar` is not two-dimensional.
Notes
-----
The function will generate a copy of `ar` if it is not
C-contiguous, which will negatively affect performance for large
input arrays.
Examples
--------
>>> ar = np.array([[1, 0, 1],
... [0, 1, 0],
... [1, 0, 1]], np.uint8)
>>> unique_rows(ar)
array([[0, 1, 0],
[1, 0, 1]], dtype=uint8)
"""
if ar.ndim != 2:
raise ValueError("unique_rows() only makes sense for 2D arrays, "
"got %dd" % ar.ndim)
# the view in the next line only works if the array is C-contiguous
ar = np.ascontiguousarray(ar)
# np.unique() finds identical items in a raveled array. To make it
# see each row as a single item, we create a view of each row as a
# byte string of length itemsize times number of columns in `ar`
ar_row_view = ar.view('|S%d' % (ar.itemsize * ar.shape[1]))
_, unique_row_indices = np.unique(ar_row_view, return_index=True)
ar_out = ar[unique_row_indices]
return ar_out
| 0 |
# Copyright 2008-2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''
Module that contains the logic for evaluating expressions in the
template.
Expression evaluation is done without using a real python eval to
keep it safe from arbitrary code execution
Both parameter values and functions are stored in a dict that is
passed to the Expression object when it is executed. The expressions
should not allow access to anything outside this dict, and only
sane access to objects reachable from this dict.
Parameter lookup gets list and dict items as well as object attributes.
It does not allow accessing private attributes (starting with "_")
or or code objects (object method of function) - callable objects on
the other hand can be accessed.
We control execution by only executing functions that are specifically
whitelisted as being an ExpressionFunction (can be used as decorator).
The expression classes have builtin support for some builtin methods
on strings, lists and dicts (see L{ExpressionString},
L{ExpressionDict} and L{ExpressionList} respectively), other functions
can be supplied in the context dict or as object attributes.
The biggest risks would be to put objects in the dict that allow
access to dangerous methods or private data. Or to have functions
that e.g. eval one of their arguments or take a callable argument
The idea is that objects in the expression dict are proxies that
expose a sub set of the full object API and template friendly methods.
These restrictions hsould help to minimize risk of arbitrary code
execution in expressions.
'''
try:
import collections.abc as abc
except ImportError:
# python < version 3.3
import collections as abc
import inspect
import logging
logger = logging.getLogger('zim.templates')
class Expression(object):
'''Base class for all expressions'''
__slots__ = ()
def __call__(self, dict):
'''Evaluate the expression
@param dict: the context with parameter values
'''
raise NotImplementedError
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.pprint())
def __str__(self):
return self.pprint()
def pprint(self):
'''Print the expression hierarchy'''
raise NotImplemented
class ExpressionLiteral(Expression):
'''Expression with a literal value'''
__slots__ = ('value',)
def __init__(self, value):
'''Constructor
@param value: the expression value (string, int, float, ...)
'''
self.value = value
def __eq__(self, other):
return self.value == other.value
def __call__(self, dict):
return self.value
def pprint(self):
return repr(self.value)
class ExpressionParameter(Expression):
'''Expression with a parameter name, evaluates the parameter value'''
__slots__ = ('name', 'parts', 'key')
def __init__(self, name):
'''Constructor
@param name: the parameter name
'''
self.name = name
self.parts = name.split('.')
if any(n.startswith('_') for n in self.parts):
raise ValueError('Invalid parameter name: %s' % name)
for i in range(len(self.parts)):
try:
self.parts[i] = int(self.parts[i])
except ValueError:
pass
self.key = self.parts[-1]
def __eq__(self, other):
return isinstance(other, ExpressionParameter) \
and self.name == other.name
def __call__(self, context):
value = context
for i, p in enumerate(self.parts):
try:
try:
value = value[p]
except TypeError:
# not indexable, or wrong key type - try getattr
value = getattr(value, p)
except (IndexError, KeyError, AttributeError):
# We got right type, but data is not there
logger.warning('No such parameter: %s', '.'.join(map(str, self.parts[:i + 1])))
return None
if inspect.ismethod(value) \
or inspect.isfunction(value) \
or inspect.isbuiltin(value):
raise AssertionError('Can not access parameter: %s' % self.name)
return value
@property
def parent(self):
'''Returns a C{ExpressionParameter} for the parent object.
Used e.g. to find the parent scope for assignment.
'''
if len(self.parts) > 1:
return ExpressionParameter('.'.join(map(str, self.parts[:-1])))
else:
return lambda d: d # HACK - define proper class for root namespace
def pprint(self):
return 'PARAM(%s)' % self.name
class ExpressionList(Expression):
'''Expression for a list of expressions, recurses on all items
when evaluated
'''
__slots__ = ('items',)
def __init__(self, items=None):
'''Constructor
@param items: iterable with L{Expression} objects
'''
if items:
self.items = list(items)
assert all(isinstance(item, Expression) for item in self.items)
else:
self.items = []
def __eq__(self, other):
return self.items == other.items
def __call__(self, dict):
return [item(dict) for item in self.items]
def append(self, item):
assert isinstance(item, Expression)
self.items.append(item)
def pprint(self):
return '[' + ', '.join(map(str, self.items)) + ']'
class ExpressionOperator(Expression):
'''Expression for an operator statement (e.g. "AND", "OR", "<"),
recurses for left and right side of the expression.
'''
__slots__ = ('operator', 'lexpr', 'rexpr')
def __init__(self, operator, lexpr, rexpr):
'''Constructor
@param operator: an operator function
@param lexpr: left hand L{Expression} object
@param rexpr: right hand L{Expression} object
'''
assert isinstance(lexpr, Expression)
assert isinstance(rexpr, Expression)
self.operator = operator
self.lexpr = lexpr
self.rexpr = rexpr
def __eq__(self, other):
return (self.operator, self.lexpr, self.rexpr) == (other.operator, other.lexpr, other.rexpr)
def __call__(self, dict):
lvalue = self.lexpr(dict)
rvalue = self.rexpr(dict)
return self.operator(lvalue, rvalue)
def pprint(self):
return 'OP(%s, %s, %s)' % (self.operator.__name__, self.lexpr, self.rexpr)
class ExpressionUnaryOperator(Expression):
'''Expression with a unary operator (e.g. "NOT") that recurses
for the right hand side of the statement.
'''
__slots__ = ('operator', 'rexpr')
def __init__(self, operator, rexpr):
'''Constructor
@param operator: an operator function
@param rexpr: right hand L{Expression} object
'''
assert isinstance(rexpr, Expression)
self.operator = operator
self.rexpr = rexpr
def __eq__(self, other):
return (self.operator, self.rexpr) == (other.operator, other.rexpr)
def __call__(self, dict):
rvalue = self.rexpr(dict)
return self.operator(rvalue)
def pprint(self):
return 'OP(%s, %s)' % (self.operator.__name__, self.rexpr)
class ExpressionFunctionCall(Expression):
'''Expression with a function name and arguments, recurses for
the arguments and evaluates the function.
'''
__slots__ = ('param', 'args')
def __init__(self, param, args):
'''Constuctor
@param param: an L{ExpressionParameter} that refers the function
@param args: an L{ExpressionList} with arguments
'''
assert isinstance(param, ExpressionParameter)
assert isinstance(args, ExpressionList)
self.param = param
self.args = args
def __eq__(self, other):
return (self.param, self.args) == (other.param, other.args)
def __call__(self, context):
## Lookup function:
## getitem dict / getattr objects / getattr on wrapper
obj = self.param.parent(context)
name = self.param.key
try:
function = obj[name]
if not isinstance(function, ExpressionFunction):
raise KeyError
except (TypeError, KeyError):
if hasattr(obj, name) \
and isinstance(getattr(obj, name), ExpressionFunction):
function = getattr(obj, name)
else:
wrapper = self.wrap_object(obj)
if wrapper is not None \
and hasattr(wrapper, name) \
and isinstance(getattr(wrapper, name), ExpressionFunction):
function = getattr(wrapper, name)
else:
raise AssertionError('Not a valid function: %s' % self.param.name)
## Execute function
if not isinstance(function, ExpressionFunction):
# Just being paranoid here, but leave it in to block any mistakes in above lookup
raise AssertionError('Not a valid function: %s' % self.param.name)
args = self.args(context)
return function(*args)
def wrap_object(self, obj):
'''Find a suitable wrapper that exposes safe methods for
a given object
'''
if isinstance(obj, str):
return ExpressionStringObject(obj)
elif isinstance(obj, (dict, abc.Mapping)):
return ExpressionDictObject(obj)
elif isinstance(obj, list):
return ExpressionListObject(obj)
else:
return None
def pprint(self):
return 'CALL(%s: %s)' % (self.param.name, self.args.pprint())
class ExpressionFunction(object):
'''Wrapper for methods and functions that whitelists
functions to be called from expressions
Can be used as a decorator.
'''
def __init__(self, func):
'''Constructor
@param func: the actual function
'''
self._func = func
def __get__(self, instance, owner):
# This allows using this object as a decorator as well
if instance is None:
return self
else:
return BoundExpressionFunction(instance, self._func)
def __eq__(self, other):
return self._func == other._func
def __call__(self, *a):
return self._func(*a)
def __repr__(self):
# Also shows up when function parameter is used, but not called
# (TemplateToolkit allow implicit call - we don't !)
return "<%s: %s()>" % (self.__class__.__name__, self._func.__name__)
class BoundExpressionFunction(ExpressionFunction):
'''Wrapper used by L{ExpressionFunction} when used as a decorator
for object methods.
'''
def __init__(self, obj, func):
self._obj = obj
self._func = func
def __call__(self, *a):
return self._func(self._obj, *a)
class ExpressionObjectBase(object):
'''Base method for wrapper objects that are used to determine the
safe functions to call on objects in the parameter dict.
The attribute C{_fmethods()} lists methods that can be called
safely on the wrapped objects.
'''
_fmethods = ()
def __init__(self, obj):
self._obj = obj
def __getattr__(self, name):
if name in self._fmethods:
func = ExpressionFunction(getattr(self._obj, name))
#~ setattr(self, name, func)
return func
else:
raise AttributeError
def __getitem__(self, k):
return self._obj[k]
def __iter__(self):
return iter(self._obj)
def __len__(self):
return len(self._obj)
def __str__(self):
return str(self._obj)
def __repr__(self):
return '<%s: %r>' % (self.__class__.__name__, self._obj)
@ExpressionFunction
def len(self):
return len(self)
@ExpressionFunction
def sorted(self):
return sorted(self)
@ExpressionFunction
def reversed(self):
return list(reversed(self))
class ExpressionStringObject(ExpressionObjectBase):
'''Proxy for string objects that gives safe methods for use in
expressions.
'''
_fmethods = (
'capitalize', 'center', 'count', 'endswith', 'expandtabs',
'ljust', 'lower', 'lstrip', 'replace', 'rjust', 'rsplit',
'rstrip', 'split', 'splitlines', 'startswith', 'title',
'upper',
)
class ExpressionDictObject(ExpressionObjectBase):
'''Proxy for dict objects that gives safe methods for use in
expressions.
'''
_fmethods = (
'get', 'keys', 'values', 'items',
'len', 'reversed', 'sorted'
)
# only functions for non-mutuable mapping here !
def __setitem__(self, k, v):
self._obj[k] = v
def __delitem__(self, k):
del self._obj[k]
class ExpressionListObject(ExpressionObjectBase):
'''Proxy for list objects that gives safe methods for use in
expressions.
'''
_fmethods = ('get', 'len', 'reversed', 'sorted')
@ExpressionFunction
def get(self, i, default=None):
try:
return self._obj[i]
except IndexError:
return default
| 0.026654 |
"""
Implementation of SettingsNotebook
"""
import sys
import wx
import h5py
import hashlib
import tempfile
class SettingsNotebook (wx.Notebook) :
"""
GUI container (wx.Notebook) for
"""
def __init__ (self, parent, specs=[], auto_load=True, **kwargs) :
"""
specs -- list of tuple specifying tab settings, e..g,
[ (tab_class, label(string), dev_name(string)) ]
if dev_name = None then the specified tuple does not represent device
"""
wx.Notebook.__init__(self, parent, **kwargs)
# dictionary of all tabs
self._tabs = {}
# dictionary of tabs representing devices
self._devs = {}
for tab_specs in specs :
# Unpacking specifications
if len(tab_specs) <= 1 :
raise ValueError("Minimal specification of a tab is (tab_class, tab_label,)")
tab = tab_specs[0]
label = tab_specs[1]
try :
dev_name = tab_specs[2]
except IndexError :
dev_name = label
# Initialize the GUI Tab
tab = tab(self)
# Add to the notebook
self.AddPage(tab, label)
# Add to dict of settings
self._tabs[label] = tab
# Add to dict of devices, if applicable
if dev_name :
self._devs[dev_name] = tab
# Load settings
if auto_load :
try :
self.AutoLoad()
print("Settings are auto-loaded")
except IOError :
pass
def __del__ (self) :
self.StopAllDevices()
def StartDev (self, dev_name) :
"""
Start device specified by name
"""
return self._devs[dev_name].StartDev()
def StopAllDevices (self, event=None, auto_save=True) :
"""
Close all devices that have been initialized
"""
for tab in self._devs.values() :
tab.StopDev()
# Auto save settings
if auto_save :
self.AutoSave()
print("Settings are auto-saved")
if event :
# Destroy parent window
event.GetEventObject().Destroy()
@classmethod
def GetAutoSavingFilename (cls) :
"""
Return file name where settings are saved automatically
"""
return tempfile.gettempdir() + hashlib.sha224(sys.argv[0]).hexdigest() + '.hdf5'
def AutoLoad (self) :
"""
Load settings automatically
"""
self.LoadSettings( filename = self.GetAutoSavingFilename() )
def AutoSave (self) :
"""
Save settings automatically
"""
self.SaveSettings( filename = self.GetAutoSavingFilename() )
def LoadSettings (self, event=None, filename="", title="Open HDF5 file to load settings") :
"""
Load settings. This method is closely related to <self.SaveSettings>
Return filename where settings were saved
"""
if not len(filename) :
# Ask user to select the file
openFileDialog = wx.FileDialog(self, title, "", "",
"HDF5 files (*.hdf5)|*.hdf5",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_CHANGE_DIR
)
# Check whether user cancelled
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return None
filename = openFileDialog.GetPath()
with h5py.File (filename, 'r') as f_settings :
for label, tab in f_settings["settings"].items() :
try :
self._tabs[label].SetSettings(tab)
except KeyError :
print "Load Settings Error: Settings %s are ignored" % label
return filename
def SaveSettings (self, event=None, filename = "",
default_filename = "settings.hdf5", title="Open HDF5 file to save settings") :
"""
Method for saving setting
"""
if not len(filename) :
# Ask user to select the file
openFileDialog = wx.FileDialog(self, title, "", default_filename, "HDF5 files (*.hdf5)|*.hdf5",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)
# Check whether user cancelled
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return None
filename = openFileDialog.GetPath()
with h5py.File (filename, 'a') as f_settings :
# Crete the group if it does not exist
try :
parameters_grp = f_settings["settings"]
except KeyError :
parameters_grp = f_settings.create_group("settings")
# Loop over all settings tab
for label, tab in self._tabs.items() :
# Save all settings on a given tab
try :
del parameters_grp[label]
except KeyError : pass
grp = parameters_grp.create_group(label)
for key, value in tab.GetSettings().items() :
grp[key] = value
# return valid file name
return filename
def GetAllSettings(self) :
"""
Return a dictionary of all dictionary containing settings from all tabs
"""
return dict(
(label, tab.GetSettings()) for label, tab in self._tabs.items()
) | 0.062928 |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from keystone import exception
from keystone.i18n import _
def build_v3_resource_relation(resource_name):
return ('http://docs.openstack.org/api/openstack-identity/3/rel/%s' %
resource_name)
def build_v3_extension_resource_relation(extension_name, extension_version,
resource_name):
return (
'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/%s' %
(extension_name, extension_version, resource_name))
def build_v3_parameter_relation(parameter_name):
return ('http://docs.openstack.org/api/openstack-identity/3/param/%s' %
parameter_name)
def build_v3_extension_parameter_relation(extension_name, extension_version,
parameter_name):
return (
'http://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/param/'
'%s' % (extension_name, extension_version, parameter_name))
class Parameters(object):
"""Relationships for Common parameters."""
DOMAIN_ID = build_v3_parameter_relation('domain_id')
ENDPOINT_ID = build_v3_parameter_relation('endpoint_id')
GROUP_ID = build_v3_parameter_relation('group_id')
POLICY_ID = build_v3_parameter_relation('policy_id')
PROJECT_ID = build_v3_parameter_relation('project_id')
REGION_ID = build_v3_parameter_relation('region_id')
ROLE_ID = build_v3_parameter_relation('role_id')
SERVICE_ID = build_v3_parameter_relation('service_id')
USER_ID = build_v3_parameter_relation('user_id')
class Status(object):
"""Status values supported."""
DEPRECATED = 'deprecated'
EXPERIMENTAL = 'experimental'
STABLE = 'stable'
@classmethod
def update_resource_data(cls, resource_data, status):
if status is cls.STABLE:
# We currently do not add a status if the resource is stable, the
# absence of the status property can be taken as meaning that the
# resource is stable.
return
if status is cls.DEPRECATED or status is cls.EXPERIMENTAL:
resource_data['hints'] = {'status': status}
return
raise exception.Error(message=_(
'Unexpected status requested for JSON Home response, %s') % status)
def translate_urls(json_home, new_prefix):
"""Given a JSON Home document, sticks new_prefix on each of the urls."""
for dummy_rel, resource in six.iteritems(json_home['resources']):
if 'href' in resource:
resource['href'] = new_prefix + resource['href']
elif 'href-template' in resource:
resource['href-template'] = new_prefix + resource['href-template']
| 0 |
import spidev
import time
import string
import site
import sys
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
#Initialize
if (sys.version_info < (2,7,0)):
sys.stderr.write("You need at least python 2.7.0 to use this library")
exit(1)
GPIO.setmode(GPIO.BCM)
GPIObaseADDR=8
ppFRAME = 25
ppINT = 22
GPIO.setup(ppFRAME,GPIO.OUT)
GPIO.output(ppFRAME,False) #Initialize FRAME signal
GPIO.setup(ppINT, GPIO.IN, pull_up_down=GPIO.PUD_UP) #reserve interrupt input
spi = spidev.SpiDev()
spi.open(0,1)
localPath=site.getsitepackages()[0]
helpPath=localPath+'/piplates/DAQChelp.txt'
DAQCversion=1.03
daqcsPresent = range(8)
Vcc=range(8)
MAXADDR=8
def CLOSE():
spi.close()
GPIO.cleanup()
def Help():
help()
def HELP():
help()
def help():
valid=True
try:
f=open(helpPath,'r')
while(valid):
Count=0
while (Count<20):
s=f.readline()
if (len(s)!=0):
print s[:len(s)-1]
Count = Count + 1
if (Count==20):
Input=raw_input('press \"Enter\" for more...')
else:
Count=100
valid=False
f.close()
except IOError:
print ("Can't find help file.")
#===============================================================================#
# ADC Functions #
#===============================================================================#
def getADC(addr,channel):
VerifyADDR(addr)
VerifyAINchannel(channel)
resp=ppCMD(addr,0x30,channel,0,2)
value=(256*resp[0]+resp[1])
value=round(value*4.096/1024,3)
if (channel==8):
value=value*2.0
return value
def getADCall(addr):
value=range(8)
VerifyADDR(addr)
resp=ppCMD(addr,0x31,0,0,16)
for i in range (0,8):
value[i]=(256*resp[2*i]+resp[2*i+1])
value[i]=round(value[i]*4.096/1024,3)
return value
#===============================================================================#
# Digital Input Functions #
#===============================================================================#
def getDINbit(addr,bit):
VerifyADDR(addr)
VerifyDINchannel(bit)
resp=ppCMD(addr,0x20,bit,0,1)
if resp[0] > 0:
return 1
else:
return 0
def getDINall(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x25,0,0,1)
return resp[0]
def enableDINint(addr, bit, edge): # enable DIN interrupt
VerifyADDR(addr)
VerifyDINchannel(bit)
if ((edge=='f') or (edge=='F')):
resp=ppCMD(addr,0x21,bit,0,0)
if ((edge=='r') or (edge=='R')):
resp=ppCMD(addr,0x22,bit,0,0)
if ((edge=='b') or (edge=='B')):
resp=ppCMD(addr,0x23,bit,0,0)
def disableDINint(addr,bit): # disable DIN interrupt
VerifyADDR(addr)
VerifyDINchannel(bit)
resp=ppCMD(addr,0x24,bit,0,0)
def getTEMP(addr,channel,scale):
VerifyADDR(addr)
assert ((channel>=0) and (channel<=7)),"Channel value out of range. Must be a value between 0 and 7"
scal=scale.lower()
assert ((scal=='c') or (scal=='f') or (scal=='k')), "Temperature scale must be 'c', 'f', or 'k'."
resp=ppCMD(addr,0x70,channel,0,0) #initiate measurement
time.sleep(1)
resp=ppCMD(addr,0x71,channel,0,2) #get data
Temp=resp[0]*256+resp[1]
if (Temp>0x8000):
Temp = Temp^0xFFFF
Temp = -(Temp+1)
Temp = round((Temp/16.0),4)
if (scal=='k'):
Temp = Temp + 273
if (scal=='f'):
Temp = round((Temp*1.8+32.2),4)
return Temp
#===============================================================================#
# Hybrid Functions #
#=========================================================================#
def getRANGE(addr, channel, units):
VerifyADDR(addr)
assert ((channel>=0) and (channel<=6)),"Channel value out of range. Must be a value between 0 and 6"
uni=units.lower()
assert ((uni=='c') or (uni=='i')), "ERROR: incorrect units parameter. Must be 'c' or 'i'."
resp = ppCMD(addr,0x80,channel,0,0) # initiate measurement
time.sleep(.07)
resp = ppCMD(addr,0x81,channel,0,2) # get data
Range = resp[0]*256+resp[1]
#assert (Range!=0), "ERROR: sensor failure?"
if (uni == 'c'):
Range = Range/58.326
if (uni == 'i'):
Range = Range/148.148
Range = round(Range, 3)
return Range
#===============================================================================#
# LED Functions #
#===============================================================================#
def setLED(addr,led):
VerifyADDR(addr)
VerifyLED(led)
resp=ppCMD(addr,0x60,led,0,0)
def clrLED(addr,led):
VerifyADDR(addr)
VerifyLED(led)
resp=ppCMD(addr,0x61,led,0,0)
def toggleLED(addr,led):
VerifyADDR(addr)
VerifyLED(led)
resp=ppCMD(addr,0x62,led,0,0)
def getLED(addr,led):
VerifyADDR(addr)
VerifyLED(led)
resp=ppCMD(addr,0x63,led,0,1)
return resp[0]
def VerifyLED(led):
assert (led>=0 and led<=1),"Invalid LED value. Must be 0 or 1"
#==============================================================================#
# Switch Functions #
#==============================================================================#
def getSWstate(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x50,0,0,1)
return resp[0]
def enableSWint(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x51,0,0,0)
def disableSWint(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x52,0,0,0)
def enableSWpower(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x53,0,0,0)
def disableSWpower(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x54,0,0,0)
#==============================================================================#
# Digital Output Functions #
#==============================================================================#
def setDOUTbit(addr,bit):
VerifyADDR(addr)
VerifyDOUTchannel(bit)
resp=ppCMD(addr,0x10,bit,0,0)
def clrDOUTbit(addr,bit):
VerifyADDR(addr)
VerifyDOUTchannel(bit)
resp=ppCMD(addr,0x11,bit,0,0)
def toggleDOUTbit(addr,bit):
VerifyADDR(addr)
VerifyDOUTchannel(bit)
resp=ppCMD(addr,0x12,bit,0,0)
def setDOUTall(addr,byte):
VerifyADDR(addr)
assert ((byte>=0) and (byte<=127)),"Digital output value out of range. Must be in the range of 0 to 127"
resp=ppCMD(addr,0x13,byte,0,0)
def getDOUTbyte(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x14,0,0,1)
return resp
#==============================================================================#
# PWM and DAC Output Functions #
#==============================================================================#
def setPWM(addr,channel,value):
VerifyADDR(addr)
assert (value<=1023 and value>=0), "ERROR: PWM argument out of range - must be between 0 and 1023"
assert (channel==0 or channel==1), "Error: PWM channel must be 0 or 1"
hibyte = value>>8
lobyte = value - (hibyte<<8)
resp=ppCMD(addr,0x40+channel,hibyte,lobyte,0)
def getPWM(addr,channel):
VerifyADDR(addr)
assert (channel==0 or channel==1), "Error: PWM channel must be 0 or 1"
## Return PWM set value
resp=ppCMD(addr,0x40+channel+2,0,0,2)
value=(256*resp[0]+resp[1])
return value
def setDAC(addr,channel,value):
global Vcc
VerifyADDR(addr)
assert (value>=0 and value<=4.095), "ERROR: PWM argument out of range - must be between 0 and 4.095 volts"
assert (channel==0 or channel==1), "Error: DAC channel must be 0 or 1"
value = int(value/Vcc[addr]*1024)
hibyte = value>>8
lobyte = value - (hibyte<<8)
resp=ppCMD(addr,0x40+channel,hibyte,lobyte,0)
def getDAC(addr,channel):
global Vcc
VerifyADDR(addr)
assert (channel==0 or channel==1), "Error: DAC channel must be 0 or 1"
## Return DAC value
resp=ppCMD(addr,0x40+channel+2,0,0,2)
value=(256*resp[0]+resp[1])
value=value*Vcc[addr]/1023
return value
def calDAC(addr):
global Vcc
VerifyADDR(addr)
Vcc[addr] = getADC(addr,8)
#==============================================================================#
# Interrupt Control Functions #
#==============================================================================#
def intEnable(addr): #DAQC will pull down on INT pin if an enabled event occurs
VerifyADDR(addr)
resp=ppCMD(addr,0x04,0,0,0)
def intDisable(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x05,0,0,0)
def getINTflags(addr): #read INT flag registers in DAQC
VerifyADDR(addr)
resp=ppCMD(addr,0x06,0,0,2)
value=(256*resp[0]+resp[1])
return value
#==============================================================================#
# System Functions #
#==============================================================================#
def getFWrev(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x03,0,0,1)
rev = resp[0]
whole=float(rev>>4)
point = float(rev&0x0F)
return whole+point/10.0
def getHWrev(addr):
VerifyADDR(addr)
resp=ppCMD(addr,0x02,0,0,1)
rev = resp[0]
whole=float(rev>>4)
point = float(rev&0x0F)
return whole+point/10.0
def getVersion():
return DAQCversion
def getADDR(addr):
assert ((addr>=0) and (addr<MAXADDR)),"DAQCplate address must be in the range of 0 to 7"
resp=ppCMD(addr,0x00,0,0,1)
return resp[0]
def getID(addr):
global GPIObaseADDR
VerifyADDR(addr)
addr=addr+GPIObaseADDR
id=""
arg = range(4)
resp = []
arg[0]=addr;
arg[1]=0x1;
arg[2]=0;
arg[3]=0;
ppFRAME = 25
GPIO.output(ppFRAME,True)
null = spi.writebytes(arg)
count=0
time.sleep(.0001)
while (count<20):
dummy=spi.xfer([00],500000,40)
time.sleep(.0001)
if (dummy[0] != 0):
num = dummy[0]
id = id + chr(num)
count = count + 1
else:
count=20
GPIO.output(ppFRAME,False)
return id
def getPROGdata(addr,paddr): #read a byte of data from program memory
VerifyADDR(addr)
resp=ppCMD(addr,0xF0,paddr>>8,paddr&0xFF,2)
value=(256*resp[0]+resp[1])
return hex(value)
def Poll():
ppFoundCount=0
for i in range (0,8):
rtn = getADDR(i)
if ((rtn-8)==i):
print "DAQCplate found at address",rtn-8
ppFoundCount += 1
if (ppFoundCount == 0):
print "No DAQCplates found"
def VerifyDINchannel(din):
assert ((din>=0) and (din<=7)),"Digital input channel value out of range. Must be in the range of 0 to 7"
def VerifyAINchannel(ain):
assert ((ain>=0) and (ain<=8)),"Analog input channel value out of range. Must be in the range of 0 to 8"
def VerifyDOUTchannel(dout):
assert ((dout>=0) and (dout<=6)),"Digital output channel value out of range. Must be in the range of 0 to 6"
def VerifyADDR(addr):
assert ((addr>=0) and (addr<MAXADDR)),"DAQCplate address must be in the range of 0 to 7"
addr_str=str(addr)
assert (daqcsPresent[addr]==1),"No DAQCplate found at address "+addr_str
def ppCMD(addr,cmd,param1,param2,bytes2return):
global GPIObaseADDR
arg = range(4)
resp = []
arg[0]=addr+GPIObaseADDR;
arg[1]=cmd;
arg[2]=param1;
arg[3]=param2;
# time.sleep(.0005)
GPIO.output(ppFRAME,True)
null=spi.xfer(arg,300000,60)
#null = spi.writebytes(arg)
if bytes2return>0:
time.sleep(.0001)
for i in range(0,bytes2return):
dummy=spi.xfer([00],500000,20)
resp.append(dummy[0])
GPIO.output(ppFRAME,False)
time.sleep(.0003)
return resp
def Init():
global daqcsPresent
for i in range (0,8):
daqcsPresent[i]=0
Vcc[i]=10000
rtn = getADDR(i)
if ((rtn-8)==i):
daqcsPresent[i]=1
ok=0
while(ok==0):
Vcc[i] = getADC(i,8)
if Vcc[i]>3.0:
ok=1
setDOUTall(i,0)
setPWM(i,0,0)
setPWM(i,1,0)
Init()
| 0.048449 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
def get_context(context):
project_user = frappe.db.get_value("Project User", {"parent": frappe.form_dict.project, "user": frappe.session.user} , ["user", "view_attachments"], as_dict= True)
if frappe.session.user != 'Administrator' and (not project_user or frappe.session.user == 'Guest'):
raise frappe.PermissionError
context.no_cache = 1
context.show_sidebar = True
project = frappe.get_doc('Project', frappe.form_dict.project)
project.has_permission('read')
project.tasks = get_tasks(project.name, start=0, item_status='open',
search=frappe.form_dict.get("search"))
project.timesheets = get_timesheets(project.name, start=0,
search=frappe.form_dict.get("search"))
if project_user and project_user.view_attachments:
project.attachments = get_attachments(project.name)
context.doc = project
def get_tasks(project, start=0, search=None, item_status=None):
filters = {"project": project}
if search:
filters["subject"] = ("like", "%{0}%".format(search))
# if item_status:
# filters["status"] = item_status
tasks = frappe.get_all("Task", filters=filters,
fields=["name", "subject", "status", "_seen", "_comments", "modified", "description"],
limit_start=start, limit_page_length=10)
for task in tasks:
task.todo = frappe.get_all('ToDo',filters={'reference_name':task.name, 'reference_type':'Task'},
fields=["assigned_by", "owner", "modified", "modified_by"])
if task.todo:
task.todo=task.todo[0]
task.todo.user_image = frappe.db.get_value('User', task.todo.owner, 'user_image')
task.comment_count = len(json.loads(task._comments or "[]"))
task.css_seen = ''
if task._seen:
if frappe.session.user in json.loads(task._seen):
task.css_seen = 'seen'
return tasks
@frappe.whitelist()
def get_task_html(project, start=0, item_status=None):
return frappe.render_template("erpnext/templates/includes/projects/project_tasks.html",
{"doc": {
"name": project,
"project_name": project,
"tasks": get_tasks(project, start, item_status=item_status)}
}, is_path=True)
def get_timesheets(project, start=0, search=None):
filters = {"project": project}
if search:
filters["activity_type"] = ("like", "%{0}%".format(search))
timesheets = frappe.get_all('Timesheet Detail', filters=filters,
fields=['project','activity_type','from_time','to_time','parent'],
limit_start=start, limit_page_length=10)
for timesheet in timesheets:
timesheet.infos = frappe.get_all('Timesheet', filters={"name": timesheet.parent},
fields=['name','_comments','_seen','status','modified','modified_by'],
limit_start=start, limit_page_length=10)
for timesheet.info in timesheet.infos:
timesheet.info.user_image = frappe.db.get_value('User', timesheet.info.modified_by, 'user_image')
timesheet.info.comment_count = len(json.loads(timesheet.info._comments or "[]"))
timesheet.info.css_seen = ''
if timesheet.info._seen:
if frappe.session.user in json.loads(timesheet.info._seen):
timesheet.info.css_seen = 'seen'
return timesheets
@frappe.whitelist()
def get_timesheet_html(project, start=0):
return frappe.render_template("erpnext/templates/includes/projects/project_timesheets.html",
{"doc": {"timesheets": get_timesheets(project, start)}}, is_path=True)
def get_attachments(project):
return frappe.get_all('File', filters= {"attached_to_name": project, "attached_to_doctype": 'Project', "is_private":0},
fields=['file_name','file_url', 'file_size'])
| 0.02942 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Server Mode',
'version': '8.0.0.1.3',
"author": "ADHOC SA",
"website": "www.adhoc.com.ar",
'license': 'AGPL-3',
"category": "GenericModules",
'sequence': 10,
'description': """
Website Server Mode
===================
Link bewteen server_mode and website modules
""",
'images': [],
'depends': [
"server_mode",
"website",
],
'data': [
"oerp_wb_develope_js.xml",
],
'demo': [],
'test': [],
'installable': True,
'auto_install': True,
'application': False,
'qweb': [],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 0 |
# xpyBuild - eXtensible Python-based Build System
#
# This module holds definitions that are used throughout the build system, and
# typically all names from this module will be imported.
#
# Copyright (c) 2019 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id: buildcommon.py 301527 2017-02-06 15:31:43Z matj $
#
""" Utility functions for manipulating strings, such as
`xpybuild.utils.stringutils.compareVersions`.
"""
import traceback, os, sys, io
import re
import platform
import logging
def compareVersions(v1: str, v2: str) -> int:
""" Compares two alphanumeric dotted version strings to see which is more recent.
Example usage::
if compareVersions(thisversion, '1.2.alpha-3') > 0:
... # thisversion is newer than 1.2.alpha-3
The comparison algorithm ignores case, and normalizes separators ./-/_
so that `'1.alpha2'=='1Alpha2'`. Any string components are compared
lexicographically with other strings, and compared to numbers
strings are always considered greater.
@param v1: A string containing a version number, with any number of components.
@param v2: A string containing a version number, with any number of components.
@return: an integer > 0 if v1>v2,
an integer < 0 if v1<v2,
or 0 if they are semantically the same.
>>> compareVersions('10-alpha5.dev10', '10alpha-5-dEv_10') == 0 # normalization of case and separators
True
>>> compareVersions('1.2.0', '1.2')
0
>>> compareVersions('1.02', '1.2')
0
>>> compareVersions('1.2.3', '1.2') > 0
True
>>> compareVersions('1.2', '1.2.3')
-1
>>> compareVersions('10.2', '1.2')
1
>>> compareVersions('1.2.text', '1.2.0') # letters are > numbers
1
>>> compareVersions('1.2.text', '1.2') # letters are > numbers
1
>>> compareVersions('10.2alpha1', '10.2alpha')
1
>>> compareVersions('10.2dev', '10.2alpha') # letters are compared lexicographically
1
>>> compareVersions('', '')
0
>>> compareVersions('1', '')
1
"""
def normversion(v):
# normalize versions into a list of components, with integers for the numeric bits
v = [int(x) if x.isdigit() else x for x in re.split('([0-9]+|[.])', v.lower().replace('-','.').replace('_','.')) if (x and x != '.') ]
return v
v1 = normversion(v1)
v2 = normversion(v2)
# make them the same length
while len(v1)<len(v2): v1.append(0)
while len(v1)>len(v2): v2.append(0)
for i in range(len(v1)):
if type(v1[i]) != type(v2[i]): # can't use > on different types
if type(v2[i])==int: # define string>int
return +1
else:
return -1
else:
if v1[i] > v2[i]: return 1
if v1[i] < v2[i]: return -1
return 0
def formatTimePeriod(secs):
"""
Format a time period to a short display string.
"""
if secs >= 120:
return '%0.1f minutes'%(secs/60.0)
elif secs >= 10:
return '%d seconds'%(secs)
else:
return '%0.1f seconds'%(secs)
| 0.033913 |
from modsim import *
params = Params(
x = 0, # m
y = 1, # m
angle = 45, # degree
velocity = 40, # m / s
mass = 145e-3, # kg
diameter = 73e-3, # m
C_d = 0.33, # dimensionless
rho = 1.2, # kg/m**3
g = 9.8, # m/s**2
t_end = 10, # s
)
from modsim import *
from numpy import pi, deg2rad
def make_system(params):
# convert angle to degrees
theta = deg2rad(params.angle)
# compute x and y components of velocity
vx, vy = pol2cart(theta, params.velocity)
# make the initial state
init = State(x=params.x, y=params.y, vx=vx, vy=vy)
# compute the frontal area
area = pi * (params.diameter/2)**2
return System(params,
init = init,
area = area,
)
from modsim import *
def drag_force(V, system):
rho, C_d, area = system.rho, system.C_d, system.area
mag = rho * vector_mag(V)**2 * C_d * area / 2
direction = -vector_hat(V)
f_drag = mag * direction
return f_drag
from modsim import *
def slope_func(t, state, system):
x, y, vx, vy = state
mass, g = system.mass, system.g
V = Vector(vx, vy)
a_drag = drag_force(V, system) / mass
a_grav = g * Vector(0, -1)
A = a_grav + a_drag
return V.x, V.y, A.x, A.y
from modsim import *
def event_func(t, state, system):
x, y, vx, vy = state
return y
from modsim import *
params = Params(
x = 0, # m
y = 1, # m
angle = 45, # degree
velocity = 40, # m / s
mass = 145e-3, # kg
diameter = 73e-3, # m
C_d = 0.33, # dimensionless
rho = 1.2, # kg/m**3
g = 9.8, # m/s**2
t_end = 10, # s
)
from modsim import *
from numpy import pi, deg2rad
def make_system(params):
# convert angle to degrees
theta = deg2rad(params.angle)
# compute x and y components of velocity
vx, vy = pol2cart(theta, params.velocity)
# make the initial state
init = State(x=params.x, y=params.y, vx=vx, vy=vy)
# compute the frontal area
area = pi * (params.diameter/2)**2
return System(params,
init = init,
area = area,
)
from modsim import *
def drag_force(V, system):
rho, C_d, area = system.rho, system.C_d, system.area
mag = rho * vector_mag(V)**2 * C_d * area / 2
direction = -vector_hat(V)
f_drag = mag * direction
return f_drag
from modsim import *
def slope_func(t, state, system):
x, y, vx, vy = state
mass, g = system.mass, system.g
V = Vector(vx, vy)
a_drag = drag_force(V, system) / mass
a_grav = g * Vector(0, -1)
A = a_grav + a_drag
return V.x, V.y, A.x, A.y
from modsim import *
def event_func(t, state, system):
x, y, vx, vy = state
return y
from modsim import *
params = Params(
x = 0, # m
y = 1, # m
angle = 45, # degree
velocity = 40, # m / s
mass = 145e-3, # kg
diameter = 73e-3, # m
C_d = 0.33, # dimensionless
rho = 1.2, # kg/m**3
g = 9.8, # m/s**2
t_end = 10, # s
)
from modsim import *
from numpy import pi, deg2rad
def make_system(params):
# convert angle to degrees
theta = deg2rad(params.angle)
# compute x and y components of velocity
vx, vy = pol2cart(theta, params.velocity)
# make the initial state
init = State(x=params.x, y=params.y, vx=vx, vy=vy)
# compute the frontal area
area = pi * (params.diameter/2)**2
return System(params,
init = init,
area = area,
)
from modsim import *
def drag_force(V, system):
rho, C_d, area = system.rho, system.C_d, system.area
mag = rho * vector_mag(V)**2 * C_d * area / 2
direction = -vector_hat(V)
f_drag = mag * direction
return f_drag
from modsim import *
def slope_func(t, state, system):
x, y, vx, vy = state
mass, g = system.mass, system.g
V = Vector(vx, vy)
a_drag = drag_force(V, system) / mass
a_grav = g * Vector(0, -1)
A = a_grav + a_drag
return V.x, V.y, A.x, A.y
from modsim import *
def event_func(t, state, system):
x, y, vx, vy = state
return y
from modsim import *
params = Params(
x = 0, # m
y = 1, # m
angle = 45, # degree
velocity = 40, # m / s
mass = 145e-3, # kg
diameter = 73e-3, # m
C_d = 0.33, # dimensionless
rho = 1.2, # kg/m**3
g = 9.8, # m/s**2
t_end = 10, # s
)
from modsim import *
from numpy import pi, deg2rad
def make_system(params):
# convert angle to degrees
theta = deg2rad(params.angle)
# compute x and y components of velocity
vx, vy = pol2cart(theta, params.velocity)
# make the initial state
init = State(x=params.x, y=params.y, vx=vx, vy=vy)
# compute the frontal area
area = pi * (params.diameter/2)**2
return System(params,
init = init,
area = area,
)
from modsim import *
def drag_force(V, system):
rho, C_d, area = system.rho, system.C_d, system.area
mag = rho * vector_mag(V)**2 * C_d * area / 2
direction = -vector_hat(V)
f_drag = mag * direction
return f_drag
from modsim import *
def slope_func(t, state, system):
x, y, vx, vy = state
mass, g = system.mass, system.g
V = Vector(vx, vy)
a_drag = drag_force(V, system) / mass
a_grav = g * Vector(0, -1)
A = a_grav + a_drag
return V.x, V.y, A.x, A.y
from modsim import *
def event_func(t, state, system):
x, y, vx, vy = state
return y
| 0.034122 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A remote procedure call (rpc) abstraction.
For some wrappers that add message versioning to rpc, see:
rpc.dispatcher
rpc.proxy
"""
from nova.openstack.common import cfg
from nova.openstack.common import importutils
rpc_opts = [
cfg.StrOpt('rpc_backend',
default='%s.impl_kombu' % __package__,
help="The messaging module to use, defaults to kombu."),
cfg.IntOpt('rpc_thread_pool_size',
default=64,
help='Size of RPC thread pool'),
cfg.IntOpt('rpc_conn_pool_size',
default=30,
help='Size of RPC connection pool'),
cfg.IntOpt('rpc_response_timeout',
default=60,
help='Seconds to wait for a response from call or multicall'),
cfg.IntOpt('rpc_cast_timeout',
default=30,
help='Seconds to wait before a cast expires (TTL). '
'Only supported by impl_zmq.'),
cfg.ListOpt('allowed_rpc_exception_modules',
default=['nova.openstack.common.exception',
'nova.exception',
],
help='Modules of exceptions that are permitted to be recreated'
'upon receiving exception data from an rpc call.'),
cfg.StrOpt('control_exchange',
default='nova',
help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
cfg.BoolOpt('fake_rabbit',
default=False,
help='If passed, use a fake RabbitMQ provider'),
]
cfg.CONF.register_opts(rpc_opts)
def create_connection(new=True):
"""Create a connection to the message bus used for rpc.
For some example usage of creating a connection and some consumers on that
connection, see nova.service.
:param new: Whether or not to create a new connection. A new connection
will be created by default. If new is False, the
implementation is free to return an existing connection from a
pool.
:returns: An instance of openstack.common.rpc.common.Connection
"""
return _get_impl().create_connection(cfg.CONF, new=new)
def call(context, topic, msg, timeout=None):
"""Invoke a remote method that returns something.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: A dict from the remote method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().call(cfg.CONF, context, topic, msg, timeout)
def cast(context, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast(cfg.CONF, context, topic, msg)
def fanout_cast(context, topic, msg):
"""Broadcast a remote method invocation with no return.
This method will get invoked on all consumers that were set up with this
topic name and fanout=True.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=True.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast(cfg.CONF, context, topic, msg)
def multicall(context, topic, msg, timeout=None):
"""Invoke a remote method and get back an iterator.
In this case, the remote method will be returning multiple values in
separate messages, so the return values can be processed as the come in via
an iterator.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the rpc message to. This correlates to the
topic argument of
openstack.common.rpc.common.Connection.create_consumer()
and only applies when the consumer was created with
fanout=False.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:param timeout: int, number of seconds to use for a response timeout.
If set, this overrides the rpc_response_timeout option.
:returns: An iterator. The iterator will yield a tuple (N, X) where N is
an index that starts at 0 and increases by one for each value
returned and X is the Nth value that was returned by the remote
method.
:raises: openstack.common.rpc.common.Timeout if a complete response
is not received before the timeout is reached.
"""
return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
def notify(context, topic, msg):
"""Send notification event.
:param context: Information that identifies the user that has made this
request.
:param topic: The topic to send the notification to.
:param msg: This is a dict of content of event.
:returns: None
"""
return _get_impl().notify(cfg.CONF, context, topic, msg)
def cleanup():
"""Clean up resoruces in use by implementation.
Clean up any resources that have been allocated by the RPC implementation.
This is typically open connections to a messaging service. This function
would get called before an application using this API exits to allow
connections to get torn down cleanly.
:returns: None
"""
return _get_impl().cleanup()
def cast_to_server(context, server_params, topic, msg):
"""Invoke a remote method that does not return anything.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic,
msg)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Broadcast to a remote method invocation with no return.
:param context: Information that identifies the user that has made this
request.
:param server_params: Connection information
:param topic: The topic to send the notification to.
:param msg: This is a dict in the form { "method" : "method_to_invoke",
"args" : dict_of_kwargs }
:returns: None
"""
return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params,
topic, msg)
def queue_get_for(context, topic, host):
"""Get a queue name for a given topic + host.
This function only works if this naming convention is followed on the
consumer side, as well. For example, in nova, every instance of the
nova-foo service calls create_consumer() for two topics:
foo
foo.<host>
Messages sent to the 'foo' topic are distributed to exactly one instance of
the nova-foo service. The services are chosen in a round-robin fashion.
Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
<host>.
"""
return '%s.%s' % (topic, host)
_RPCIMPL = None
def _get_impl():
"""Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL
if _RPCIMPL is None:
try:
_RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = cfg.CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL
| 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# care_top5_clu.py
#
# Copyright 2017 Carlos Eduardo Sequeiros Borja <casebor@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import argparse
import numpy as np
from os import path
colors = ['0x000080', '0x1a1aff', '0x00ffff', '0x66ff33', '0xffff00', '0xff8000', '0xff0000', '0xff00ff', '0x800080', '0x000000']
def get_rmsd_color(rmsd):
""" Returns a rgb color in gnuplot hexadecimal format depending of
the value of the RMSD.
"""
if rmsd <= 1.0:
rgb_color = colors[0]
elif rmsd > 1.0 and rmsd <= 1.5:
rgb_color = colors[1]
elif rmsd > 1.5 and rmsd <= 2.0:
rgb_color = colors[2]
elif rmsd > 2.0 and rmsd <= 2.5:
rgb_color = colors[3]
elif rmsd > 2.5 and rmsd <= 3.0:
rgb_color = colors[4]
elif rmsd > 3.0 and rmsd <= 3.5:
rgb_color = colors[5]
elif rmsd > 3.5 and rmsd <= 4.0:
rgb_color = colors[6]
elif rmsd > 4.0 and rmsd <= 4.5:
rgb_color = colors[7]
elif rmsd > 4.5 and rmsd <= 5.0:
rgb_color = colors[8]
else:
rgb_color = colors[9]
return rgb_color
def get_gdt_color(gdt):
""" Returns a rgb color in gnuplot hexadecimal format depending of
the value of the GDT.
"""
if gdt > 0.9:
rgb_color = colors[0]
elif gdt > 0.8 and gdt <= 0.9:
rgb_color = colors[1]
elif gdt > 0.7 and gdt <= 0.8:
rgb_color = colors[2]
elif gdt > 0.6 and gdt <= 0.7:
rgb_color = colors[3]
elif gdt > 0.5 and gdt <= 0.6:
rgb_color = colors[4]
elif gdt > 0.4 and gdt <= 0.5:
rgb_color = colors[5]
elif gdt > 0.3 and gdt <= 0.4:
rgb_color = colors[6]
elif gdt > 0.2 and gdt <= 0.3:
rgb_color = colors[7]
elif gdt > 0.1 and gdt <= 0.2:
rgb_color = colors[8]
else:
rgb_color = colors[9]
return rgb_color
def main():
""" Main function """
if args.rmsd_file is None and args.gdt_file is None:
print "Error!!!\nYou didn't specified nor a RMSD file nor a GDT file.\nPlease specify only one of them."
quit()
if args.rmsd_file is not None and args.gdt_file is not None:
print "Error!!!\nYou specified both RMSD file and GDT file.\nPlease specify only one of them."
quit()
clust_qty = np.zeros((5,), dtype=np.int)
clust_metric = np.zeros((5,), dtype=np.float32)
with open(args.infile,'r') as temporal_file:
clust_file = list(temporal_file)
if len(clust_file) > 7:
for i in range(1,6):
clust_qty[i-1] = clust_file[i].split()[-1]
else:
for i in range(1,len(clust_file)-1):
clust_qty[i-1] = clust_file[i].split()[-1]
total_struc = clust_file[-1].split()[-1]
if args.rmsd_file is not None:
out = "#Cluster Population RMSD Total Color\n"
with open(args.rmsd_file,'r') as temporal_file:
clust_file = list(temporal_file)
if len(clust_file) > 6:
for i in range(1,6):
clust_metric[i-1] = clust_file[i].split()[-1]
else:
for i in range(1,len(clust_file)):
clust_metric[i-1] = clust_file[i].split()[-1]
for i in range(5):
out += "Cluster_%d %d %f %s %s\n" %(i+1, clust_qty[i], clust_metric[i], total_struc, get_rmsd_color(clust_metric[i]))
else:
out = "#Cluster Population GDT Total Color\n"
with open(args.infile,'r') as temporal_file:
clust_file = list(temporal_file)
if len(clust_file) > 5:
for i in range(5):
clust_metric[i] = clust_file[i].split()[-1]
else:
for i in range(len(clust_file)):
clust_metric[i] = clust_file[i].split()[-1]
for i in range(5):
out += "Cluster_%d %d %f %s %s\n" %(i+1, clust_qty[i], clust_metric[i], total_struc, get_gdt_color(clust_metric[i]))
args.outfile.write(out)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Obtain the best 5 more populated clusters using RMSD or GDT-TS as additional data.')
parser.add_argument('-i', '--in', action='store', required=True, dest='infile', help='File containing data of the clusters and population of each.')
parser.add_argument('-r', '--rmsd', action='store', required=False, dest='rmsd_file', help='File with the RMSD values of each centroid.')
parser.add_argument('-g', '--gdt', action='store', required=False, dest='gdt_file', help='File with the GDT values of each centroid.')
parser.add_argument('-o', '--out', action='store', type=argparse.FileType('w'), required=True, dest='outfile', help='Specifies the name for the output file.')
args = parser.parse_args()
main()
| 0.03013 |
import os
from copy import deepcopy
from stat import S_ISDIR
from math import ceil
from collections import defaultdict
from time import ctime, time
from textwrap import fill
try:
import matplotlib
if not os.environ.get('DISPLAY'):
# Use non-interactive Agg backend
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
import platform
if platform.python_implementation() == 'PyPy':
# PyPy doesn't have a version of matplotlib. Make fake classes and
# a Line2D function that raises if used. This allows us to use
# other 'dark' code that happens to import dark.graphics but which
# does not use the functions that rely on matplotlib.
class plt(object):
def __getattr__(self, _):
raise NotImplementedError(
'matplotlib is not supported under pypy')
gridspec = patches = plt
def Line2D(*args, **kwargs):
raise NotImplementedError('matplotlib is not supported under pypy')
else:
raise
else:
from matplotlib.lines import Line2D
from matplotlib import gridspec, patches
import numpy as np
from dark.aa import propertiesForSequence, clustersForSequence
from dark.baseimage import BaseImage
from dark.dimension import dimensionalIterator
from dark.html import AlignmentPanelHTMLWriter, NCBISequenceLinkURL
from dark.intervals import ReadIntervals
from dark.features import ProteinFeatureAdder, NucleotideFeatureAdder
from dark import orfs
from dark.intervals import OffsetAdjuster
from dark.score import HigherIsBetterScore
QUERY_COLORS = {
'A': (1.0, 0.0, 0.0), # Red.
'C': (0.0, 0.0, 1.0), # Blue.
'G': (0.0, 1.0, 0.0), # Green.
'N': (1.0, 0.0, 1.0), # Purple.
'T': (1.0, 0.8, 0.0), # Orange.
'gap': (0.2, 0.2, 0.2), # Almost black.
'match': (0.9, 0.9, 0.9), # Almost white.
'*': (0.9, 0.9, 0.9), # Almost white.
}
DEFAULT_BASE_COLOR = (0.5, 0.5, 0.5) # Grey
# From http://www.randalolson.com/2014/06/28/how-to-make-beautiful-data-\
# visualizations-in-python-with-matplotlib/
#
# These are the "Tableau 20" colors as RGB.
TABLEAU20 = [
(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the above RGB values to the [0, 1] range, the format matplotlib
# accepts.
for i in range(len(TABLEAU20)):
r, g, b = TABLEAU20[i]
TABLEAU20[i] = (r / 255.0, g / 255.0, b / 255.0)
# If we're making a plot that has a log-linear X axis, don't show
# background light grey rectangles for any gap whose (logged) width is less
# than SMALLEST_LOGGED_GAP_TO_DISPLAY.
SMALLEST_LOGGED_GAP_TO_DISPLAY = 20
# Y (Score) axis extra spacing above the best read match. The score of the
# best read HSP will be multiplied by this value, the result converted to
# an int, and then used as the upper bound on the Y axis. Adding 1% seems
# to be a good heuristic.
Y_AXIS_UPPER_PADDING = 1.01
# The default base of the logarithm to use when logLinearXAxis is used to
# produce an alignment graph.
DEFAULT_LOG_LINEAR_X_AXIS_BASE = 1.1
def report(msg):
print('%s: %s' % (ctime(time()), msg))
def alignmentGraph(titlesAlignments, title, addQueryLines=True,
showFeatures=True, logLinearXAxis=False,
logBase=DEFAULT_LOG_LINEAR_X_AXIS_BASE, rankScores=False,
colorQueryBases=False, createFigure=True, showFigure=True,
readsAx=None, imageFile=None, quiet=False, idList=False,
xRange='subject', showOrfs=True):
"""
Align a set of matching reads against a BLAST or DIAMOND hit.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param title: A C{str} sequence title that was matched. We plot the
reads that hit this title.
@param addQueryLines: if C{True}, draw query lines in full (these will then
be partly overdrawn by the HSP match against the subject). These are
the 'whiskers' that potentially protrude from each side of a query.
@param showFeatures: if C{True}, look online for features of the subject
sequence (given by hitId).
@param logLinearXAxis: if C{True}, convert read offsets so that empty
regions in the plot we're preparing will only be as wide as their
logged actual values.
@param logBase: The base of the logarithm to use if logLinearXAxis is
C{True}.
@param: rankScores: If C{True}, change the e-values and bit scores for the
reads for each title to be their rank (worst to best).
@param colorQueryBases: if C{True}, color each base of a query string. If
C{True}, then addQueryLines is meaningless since the whole query is
shown colored.
@param createFigure: If C{True}, create a figure and give it a title.
@param showFigure: If C{True}, show the created figure. Set this to
C{False} if you're creating a panel of figures or just want to save an
image (with C{imageFile}).
@param readsAx: If not None, use this as the subplot for displaying reads.
@param imageFile: If not None, specifies a filename to write the image to.
@param quiet: If C{True}, don't print progress / timing output.
@param idList: a dictionary. The keys is a color and the values is a list
of read identifiers that should be colored in the respective color.
@param xRange: set to either 'subject' or 'reads' to indicate the range of
the X axis.
@param showOrfs: If C{True}, open reading frames will be displayed.
"""
startTime = time()
assert xRange in ('subject', 'reads'), (
'xRange must be either "subject" or "reads".')
if createFigure:
width = 20
figure = plt.figure(figsize=(width, 20))
createdReadsAx = readsAx is None
if showFeatures:
if showOrfs:
gs = gridspec.GridSpec(4, 1, height_ratios=[3, 1, 1, 12])
featureAx = plt.subplot(gs[0, 0])
orfAx = plt.subplot(gs[1, 0])
orfReversedAx = plt.subplot(gs[2, 0])
readsAx = readsAx or plt.subplot(gs[3, 0])
else:
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 1])
featureAx = plt.subplot(gs[0, 0])
readsAx = readsAx or plt.subplot(gs[1, 0])
else:
if showOrfs:
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 1, 12])
orfAx = plt.subplot(gs[0, 0])
orfReversedAx = plt.subplot(gs[1, 0])
readsAx = readsAx or plt.subplot(gs[2, 0])
else:
readsAx = readsAx or plt.subplot(111)
# Make a deep copy of the title alignments. We're potentially going to
# change the HSP scores, the X axis offsets, etc., and we don't want to
# interfere with the data we were passed.
titleAlignments = deepcopy(titlesAlignments[title])
readsAlignments = titlesAlignments.readsAlignments
subjectIsNucleotides = readsAlignments.params.subjectIsNucleotides
if showOrfs and not subjectIsNucleotides:
# We cannot show ORFs when displaying protein plots.
showOrfs = False
# Allow the class of titlesAlignments to adjust HSPs for plotting,
# if it has a method for doing so.
try:
adjuster = readsAlignments.adjustHspsForPlotting
except AttributeError:
pass
else:
adjuster(titleAlignments)
if rankScores:
reverse = titlesAlignments.scoreClass is not HigherIsBetterScore
for rank, hsp in enumerate(sorted(titleAlignments.hsps(),
reverse=reverse), start=1):
hsp.score.score = rank
if logLinearXAxis:
readIntervals = ReadIntervals(titleAlignments.subjectLength)
# Examine all HSPs so we can build an offset adjuster.
for hsp in titleAlignments.hsps():
readIntervals.add(hsp.readStartInSubject, hsp.readEndInSubject)
# Now adjust offsets in all HSPs.
offsetAdjuster = OffsetAdjuster(readIntervals, base=logBase)
for hsp in titleAlignments.hsps():
offsetAdjuster.adjustHSP(hsp)
# A function for adjusting other offsets, below.
adjustOffset = offsetAdjuster.adjustOffset
else:
def adjustOffset(offset):
return offset
# It would be more efficient to only walk through all HSPs once and
# compute these values all at once, but for now this is simple and clear.
maxY = int(ceil(titleAlignments.bestHsp().score.score))
minY = int(titleAlignments.worstHsp().score.score)
maxX = max(hsp.readEndInSubject for hsp in titleAlignments.hsps())
minX = min(hsp.readStartInSubject for hsp in titleAlignments.hsps())
if xRange == 'subject':
# We'll display a graph for the full subject range. Adjust X axis
# min/max to make sure we cover at least zero to the sequence length.
maxX = max(titleAlignments.subjectLength, maxX)
minX = min(0, minX)
# Swap min & max Y values, if needed, as it's possible we are dealing
# with LSPs but that the score adjuster made numerically greater values
# for those that were small.
if maxY < minY:
(maxY, minY) = (minY, maxY)
if logLinearXAxis:
# Adjust minX and maxX if we have gaps at the subject start or end.
gaps = list(readIntervals.walk())
if gaps:
# Check start of first gap:
intervalType, (start, stop) = gaps[0]
if intervalType == ReadIntervals.EMPTY:
adjustedStart = adjustOffset(start)
if adjustedStart < minX:
minX = adjustedStart
# Check stop of last gap:
intervalType, (start, stop) = gaps[-1]
if intervalType == ReadIntervals.EMPTY:
adjustedStop = adjustOffset(stop)
if adjustedStop > maxX:
maxX = adjustedStop
# We're all set up to start plotting the graph.
# Add light grey vertical rectangles to show the logarithmic gaps. Add
# these first so that reads will be plotted on top of them. Only draw
# gaps that are more than SMALLEST_LOGGED_GAP_TO_DISPLAY pixels wide as
# we could have millions of tiny gaps for a bacteria and drawing them
# all will be slow and only serves to make the entire background grey.
if logLinearXAxis and len(offsetAdjuster.adjustments()) < 100:
for (intervalType, interval) in readIntervals.walk():
if intervalType == ReadIntervals.EMPTY:
adjustedStart = adjustOffset(interval[0])
adjustedStop = adjustOffset(interval[1])
width = adjustedStop - adjustedStart
if width >= SMALLEST_LOGGED_GAP_TO_DISPLAY:
readsAx.axvspan(adjustedStart, adjustedStop,
color='#f4f4f4')
if colorQueryBases:
# Color each query by its bases.
xScale = 3
yScale = 2
baseImage = BaseImage(
maxX - minX, maxY - minY + (1 if rankScores else 0),
xScale, yScale)
for alignment in titleAlignments:
for hsp in alignment.hsps:
y = hsp.score.score - minY
# If the product of the subject and read frame values is +ve,
# then they're either both +ve or both -ve, so we just use the
# read as is. Otherwise, we need to reverse complement it.
if hsp.subjectFrame * hsp.readFrame > 0:
query = alignment.read.sequence
else:
# One of the subject or query has negative sense.
query = alignment.read.reverseComplement().sequence
readStartInSubject = hsp.readStartInSubject
# There are 3 parts of the query string we need to
# display. 1) the left part (if any) before the matched
# part of the subject. 2) the matched part (which can
# include gaps in the query and/or subject). 3) the right
# part (if any) after the matched part. For each part,
# calculate the ranges in which we have to make the
# comparison between subject and query.
# NOTE: never use hsp['origHsp'].gaps to calculate the number
# of gaps, as this number contains gaps in both subject and
# query.
# 1. Left part:
leftRange = hsp.subjectStart - readStartInSubject
# 2. Match, middle part:
middleRange = len(hsp.readMatchedSequence)
# 3. Right part:
# Using hsp.readEndInSubject - hsp.subjectEnd to calculate the
# length of the right part leads to the part being too long.
# The number of gaps needs to be subtracted to get the right
# length.
origQuery = hsp.readMatchedSequence.upper()
rightRange = (hsp.readEndInSubject - hsp.subjectEnd -
origQuery.count('-'))
# 1. Left part.
xOffset = readStartInSubject - minX
queryOffset = 0
for queryIndex in range(leftRange):
color = QUERY_COLORS.get(query[queryOffset + queryIndex],
DEFAULT_BASE_COLOR)
baseImage.set(xOffset + queryIndex, y, color)
# 2. Match part.
xOffset = hsp.subjectStart - minX
xIndex = 0
queryOffset = hsp.subjectStart - hsp.readStartInSubject
origSubject = hsp.subjectMatchedSequence
for matchIndex in range(middleRange):
if origSubject[matchIndex] == '-':
# A gap in the subject was needed to match the query.
# In our graph we keep the subject the same even in the
# case where BLAST opened gaps in it, so we compensate
# for the gap in the subject by not showing this base
# of the query.
pass
else:
if origSubject[matchIndex] == origQuery[matchIndex]:
# The query matched the subject at this location.
# Matching bases are all colored in the same
# 'match' color.
color = QUERY_COLORS['match']
else:
if origQuery[matchIndex] == '-':
# A gap in the query. All query gaps get the
# same 'gap' color.
color = QUERY_COLORS['gap']
else:
# Query doesn't match subject (and is not a
# gap).
color = QUERY_COLORS.get(origQuery[matchIndex],
DEFAULT_BASE_COLOR)
baseImage.set(xOffset + xIndex, y, color)
xIndex += 1
# 3. Right part.
xOffset = hsp.subjectEnd - minX
backQuery = query[-rightRange:].upper()
for queryIndex in range(rightRange):
color = QUERY_COLORS.get(backQuery[queryIndex],
DEFAULT_BASE_COLOR)
baseImage.set(xOffset + queryIndex, y, color)
readsAx.imshow(baseImage.data, aspect='auto', origin='lower',
interpolation='nearest',
extent=[minX, maxX, minY, maxY])
else:
# Add horizontal lines for all the query sequences. These will be the
# grey 'whiskers' in the plots once we (below) draw the matched part
# on top of part of them.
if addQueryLines:
for hsp in titleAlignments.hsps():
y = hsp.score.score
line = Line2D([hsp.readStartInSubject, hsp.readEndInSubject],
[y, y], color='#aaaaaa')
readsAx.add_line(line)
# Add the horizontal BLAST alignment lines.
# If an idList is given set things up to look up read colors.
readColor = {}
if idList:
for color, reads in idList.items():
for read in reads:
if read in readColor:
raise ValueError('Read %s is specified multiple '
'times in idList' % read)
else:
readColor[read] = color
# Draw the matched region.
for titleAlignment in titleAlignments:
readId = titleAlignment.read.id
for hsp in titleAlignment.hsps:
y = hsp.score.score
line = Line2D([hsp.subjectStart, hsp.subjectEnd], [y, y],
color=readColor.get(readId, 'blue'))
readsAx.add_line(line)
if showOrfs:
subject = readsAlignments.getSubjectSequence(title)
orfs.addORFs(orfAx, subject.sequence, minX, maxX, adjustOffset)
orfs.addReversedORFs(orfReversedAx,
subject.reverseComplement().sequence,
minX, maxX, adjustOffset)
if showFeatures:
if subjectIsNucleotides:
featureAdder = NucleotideFeatureAdder()
else:
featureAdder = ProteinFeatureAdder()
features = featureAdder.add(featureAx, title, minX, maxX,
adjustOffset)
# If there are features and there weren't too many of them, add
# vertical feature lines to the reads and ORF axes.
if features and not featureAdder.tooManyFeaturesToPlot:
for feature in features:
start = feature.start
end = feature.end
color = feature.color
readsAx.axvline(x=start, color=color)
readsAx.axvline(x=end, color='#cccccc')
if showOrfs:
orfAx.axvline(x=start, color=color)
orfAx.axvline(x=end, color='#cccccc')
orfReversedAx.axvline(x=start, color=color)
orfReversedAx.axvline(x=end, color='#cccccc')
else:
features = None
# We'll return some information we've gathered.
result = {
'adjustOffset': adjustOffset,
'features': features,
'minX': minX,
'maxX': maxX,
'minY': minY,
'maxY': maxY,
}
# Allow the class of titlesAlignments to add to the plot, if it has a
# method for doing so.
try:
adjuster = readsAlignments.adjustPlot
except AttributeError:
pass
else:
adjuster(readsAx)
# Titles, axis, etc.
if createFigure:
readCount = titleAlignments.readCount()
hspCount = titleAlignments.hspCount()
figure.suptitle(
'%s\nLength %d %s, %d read%s, %d HSP%s.' %
(
fill(titleAlignments.subjectTitle, 80),
titleAlignments.subjectLength,
'nt' if subjectIsNucleotides else 'aa',
readCount, '' if readCount == 1 else 's',
hspCount, '' if hspCount == 1 else 's'
),
fontsize=20)
# Add a title and y-axis label, but only if we made the reads axes.
if createdReadsAx:
readsAx.set_title('Read alignments', fontsize=20)
ylabel = readsAlignments.params.scoreTitle
if rankScores:
ylabel += ' rank'
plt.ylabel(ylabel, fontsize=17)
# Set the x-axis limits.
readsAx.set_xlim([minX - 1, maxX + 1])
readsAx.set_ylim([0, int(maxY * Y_AXIS_UPPER_PADDING)])
readsAx.grid()
if createFigure:
if showFigure:
plt.show()
if imageFile:
figure.savefig(imageFile)
stop = time()
if not quiet:
report('Graph generated in %.3f mins.' % ((stop - startTime) / 60.0))
return result
def alignmentPanel(titlesAlignments, sortOn='maxScore', idList=False,
equalizeXAxes=False, xRange='subject', logLinearXAxis=False,
rankScores=False, showFeatures=True,
logBase=DEFAULT_LOG_LINEAR_X_AXIS_BASE):
"""
Produces a rectangular panel of graphs that each contain an alignment graph
against a given sequence.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param sortOn: The attribute to sort subplots on. Either "maxScore",
"medianScore", "readCount", "length", or "title".
@param idList: A dictionary. Keys are colors and values are lists of read
ids that should be colored using that color.
@param equalizeXAxes: If C{True}, adjust the X axis on each alignment plot
to be the same.
@param xRange: Set to either 'subject' or 'reads' to indicate the range of
the X axis.
@param logLinearXAxis: If C{True}, convert read offsets so that empty
regions in the plots we're preparing will only be as wide as their
logged actual values.
@param logBase: The logarithm base to use if logLinearXAxis is C{True}.
@param: rankScores: If C{True}, change the scores for the reads for each
title to be their rank (worst to best).
@param showFeatures: If C{True}, look online for features of the subject
sequences.
@raise ValueError: If C{outputDir} exists but is not a directory or if
C{xRange} is not "subject" or "reads".
"""
if xRange not in ('subject', 'reads'):
raise ValueError('xRange must be either "subject" or "reads".')
start = time()
titles = titlesAlignments.sortTitles(sortOn)
cols = 5
rows = int(len(titles) / cols) + (0 if len(titles) % cols == 0 else 1)
figure, ax = plt.subplots(rows, cols, squeeze=False)
allGraphInfo = {}
coords = dimensionalIterator((rows, cols))
report('Plotting %d titles in %dx%d grid, sorted on %s' %
(len(titles), rows, cols, sortOn))
for i, title in enumerate(titles):
titleAlignments = titlesAlignments[title]
row, col = next(coords)
report('%d: %s %s' % (i, title, NCBISequenceLinkURL(title, '')))
# Add a small plot to the alignment panel.
graphInfo = alignmentGraph(
titlesAlignments, title, addQueryLines=True,
showFeatures=showFeatures, rankScores=rankScores,
logLinearXAxis=logLinearXAxis, logBase=logBase,
colorQueryBases=False, createFigure=False, showFigure=False,
readsAx=ax[row][col], quiet=True, idList=idList, xRange=xRange,
showOrfs=False)
allGraphInfo[title] = graphInfo
readCount = titleAlignments.readCount()
hspCount = titleAlignments.hspCount()
# Make a short title for the small panel blue plot, ignoring any
# leading NCBI gi / accession numbers.
if title.startswith('gi|') and title.find(' ') > -1:
shortTitle = title.split(' ', 1)[1][:40]
else:
shortTitle = title[:40]
plotTitle = ('%d: %s\nLength %d, %d read%s, %d HSP%s.' % (
i, shortTitle, titleAlignments.subjectLength,
readCount, '' if readCount == 1 else 's',
hspCount, '' if hspCount == 1 else 's'))
if hspCount:
if rankScores:
plotTitle += '\nY axis is ranked score'
else:
plotTitle += '\nmax %.2f, median %.2f' % (
titleAlignments.bestHsp().score.score,
titleAlignments.medianScore())
ax[row][col].set_title(plotTitle, fontsize=10)
maxX = max(graphInfo['maxX'] for graphInfo in allGraphInfo.values())
minX = min(graphInfo['minX'] for graphInfo in allGraphInfo.values())
maxY = max(graphInfo['maxY'] for graphInfo in allGraphInfo.values())
minY = min(graphInfo['minY'] for graphInfo in allGraphInfo.values())
# Post-process graphs to adjust axes, etc.
coords = dimensionalIterator((rows, cols))
for title in titles:
titleAlignments = titlesAlignments[title]
row, col = next(coords)
a = ax[row][col]
a.set_ylim([0, int(maxY * Y_AXIS_UPPER_PADDING)])
if equalizeXAxes:
a.set_xlim([minX, maxX])
a.set_yticks([])
a.set_xticks([])
if xRange == 'subject' and minX < 0:
# Add a vertical line at x=0 so we can see the 'whiskers' of
# reads that extend to the left of the sequence we're aligning
# against.
a.axvline(x=0, color='#cccccc')
# Add a line on the right of each sub-plot so we can see where the
# sequence ends (as all panel graphs have the same width and we
# otherwise couldn't tell).
sequenceLen = titleAlignments.subjectLength
if logLinearXAxis:
sequenceLen = allGraphInfo[title]['adjustOffset'](sequenceLen)
a.axvline(x=sequenceLen, color='#cccccc')
# Hide the final panel graphs (if any) that have no content. We do this
# because the panel is a rectangular grid and some of the plots at the
# end of the last row may be unused.
for row, col in coords:
ax[row][col].axis('off')
# plt.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.93,
# wspace=0.1, hspace=None)
plt.subplots_adjust(hspace=0.4)
figure.suptitle('X: %d to %d, Y (%s): %d to %d' %
(minX, maxX,
titlesAlignments.readsAlignments.params.scoreTitle,
int(minY), int(maxY)), fontsize=20)
figure.set_size_inches(5 * cols, 3 * rows, forward=True)
figure.show()
stop = time()
report('Alignment panel generated in %.3f mins.' % ((stop - start) / 60.0))
def alignmentPanelHTML(titlesAlignments, sortOn='maxScore',
outputDir=None, idList=False, equalizeXAxes=False,
xRange='subject', logLinearXAxis=False,
logBase=DEFAULT_LOG_LINEAR_X_AXIS_BASE,
rankScores=False, showFeatures=True, showOrfs=True):
"""
Produces an HTML index file in C{outputDir} and a collection of alignment
graphs and FASTA files to summarize the information in C{titlesAlignments}.
@param titlesAlignments: A L{dark.titles.TitlesAlignments} instance.
@param sortOn: The attribute to sort subplots on. Either "maxScore",
"medianScore", "readCount", "length", or "title".
@param outputDir: Specifies a C{str} directory to write the HTML to. If
the directory does not exist it will be created.
@param idList: A dictionary. Keys are colors and values are lists of read
ids that should be colored using that color.
@param equalizeXAxes: If C{True}, adjust the X axis on each alignment plot
to be the same.
@param xRange: Set to either 'subject' or 'reads' to indicate the range of
the X axis.
@param logLinearXAxis: If C{True}, convert read offsets so that empty
regions in the plots we're preparing will only be as wide as their
logged actual values.
@param logBase: The logarithm base to use if logLinearXAxis is C{True}.
@param: rankScores: If C{True}, change the scores for the reads for each
title to be their rank (worst to best).
@param showFeatures: If C{True}, look online for features of the subject
sequences.
@param showOrfs: If C{True}, open reading frames will be displayed.
@raise TypeError: If C{outputDir} is C{None}.
@raise ValueError: If C{outputDir} is None or exists but is not a
directory or if C{xRange} is not "subject" or "reads".
"""
if xRange not in ('subject', 'reads'):
raise ValueError('xRange must be either "subject" or "reads".')
if equalizeXAxes:
raise NotImplementedError('This feature is not yet implemented.')
titles = titlesAlignments.sortTitles(sortOn)
if os.access(outputDir, os.F_OK):
# outputDir exists. Check it's a directory.
if not S_ISDIR(os.stat(outputDir).st_mode):
raise ValueError("%r is not a directory." % outputDir)
else:
if outputDir is None:
raise ValueError("The outputDir needs to be specified.")
else:
os.mkdir(outputDir)
htmlWriter = AlignmentPanelHTMLWriter(outputDir, titlesAlignments)
for i, title in enumerate(titles):
# titleAlignments = titlesAlignments[title]
# If we are writing data to a file too, create a separate file with
# a plot (this will be linked from the summary HTML).
imageBasename = '%d.png' % i
imageFile = '%s/%s' % (outputDir, imageBasename)
graphInfo = alignmentGraph(
titlesAlignments, title, addQueryLines=True,
showFeatures=showFeatures, rankScores=rankScores,
logLinearXAxis=logLinearXAxis, logBase=logBase,
colorQueryBases=False, showFigure=False, imageFile=imageFile,
quiet=True, idList=idList, xRange=xRange, showOrfs=showOrfs)
# Close the image plot to make sure memory is flushed.
plt.close()
htmlWriter.addImage(imageBasename, title, graphInfo)
htmlWriter.close()
def scoreGraph(titlesAlignments, find=None, showTitles=False, figureWidth=5,
figureHeight=5):
"""
NOTE: This function has probably bit rotted (but only a little).
Produce a rectangular panel of graphs, each of which shows sorted scores
for a title. Matches against a certain sequence title, as determined by
C{find}, (see below) are highlighted.
@param find: A function that can be passed a sequence title. If the
function returns C{True} a red dot is put into the graph at that point
to highlight the match.
@param showTitles: If C{True} display read sequence names. The panel tends
to look terrible when titles are displayed. If C{False}, show no title.
@param figureWidth: The C{float} width of the figure, in inches.
@param figureHeight: The C{float} height of the figure, in inches.
"""
maxScore = None
maxHsps = 0
cols = 5
rows = int(len(titlesAlignments) / cols) + (
0 if len(titlesAlignments) % cols == 0 else 1)
f, ax = plt.subplots(rows, cols)
coords = dimensionalIterator((rows, cols))
for title in titlesAlignments:
titleAlignments = titlesAlignments[title]
row, col = next(coords)
hspCount = titleAlignments.hspCount()
if hspCount > maxHsps:
maxHsps = hspCount
scores = []
highlightX = []
highlightY = []
for x, titleAlignment in enumerate(titleAlignments):
score = titleAlignment.hsps[0].score.score
scores.append(score)
if find and find(titleAlignment.subjectTitle):
highlightX.append(x)
highlightY.append(score)
a = ax[row][col]
if scores:
max_ = max(scores)
if maxScore is None or max_ > maxScore:
maxScore = max_
x = np.arange(0, len(scores))
a.plot(x, scores)
if highlightX:
a.plot(highlightX, highlightY, 'ro')
if showTitles:
a.set_title('%s' % title, fontsize=10)
# Adjust all plots to have the same dimensions.
coords = dimensionalIterator((rows, cols))
for _ in range(len(titlesAlignments)):
row, col = next(coords)
a = ax[row][col]
a.axis([0, maxHsps, 0, maxScore])
# a.set_yscale('log')
a.set_yticks([])
a.set_xticks([])
# Hide the final panel graphs (if any) that have no content. We do this
# because the panel is a rectangular grid and some of the plots at the
# end of the last row may be unused.
for row, col in coords:
ax[row][col].axis('off')
plt.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.93,
wspace=0.1, hspace=None)
f.suptitle('max HSPs %d, max score %f' % (maxHsps, maxScore))
f.set_size_inches(figureWidth, figureHeight, forward=True)
# f.savefig('scores.png')
plt.show()
def scatterAlign(seq1, seq2, window=7):
"""
Visually align two sequences.
"""
d1 = defaultdict(list)
d2 = defaultdict(list)
for (seq, section_dict) in [(seq1, d1), (seq2, d2)]:
for i in range(len(seq) - window):
section = seq[i:i + window]
section_dict[section].append(i)
matches = set(d1).intersection(d2)
print('%i unique matches' % len(matches))
x = []
y = []
for section in matches:
for i in d1[section]:
for j in d2[section]:
x.append(i)
y.append(j)
# plt.cla() # clear any prior graph
plt.gray()
plt.scatter(x, y)
plt.xlim(0, len(seq1) - window)
plt.ylim(0, len(seq2) - window)
plt.xlabel('length %i bp' % (len(seq1)))
plt.ylabel('length %i bp' % (len(seq2)))
plt.title('Dot plot using window size %i\n(allowing no mis-matches)' %
window)
plt.show()
def plotAAProperties(sequence, propertyNames, showLines=True, showFigure=True):
"""
Plot amino acid property values for a sequence.
@param sequence: An C{AARead} (or a subclass) instance.
@param propertyNames: An iterable of C{str} property names (each of which
must be a key of a key in the C{dark.aa.PROPERTY_DETAILS} C{dict}).
@param showLines: If C{True}, lines will be drawn between successive AA
property values. If not, just the values will be plotted as a scatter
plot (this greatly reduces visual clutter if the sequence is long and
AA property values are variable).
@param showFigure: If C{True}, display the plot. Passing C{False} is useful
in testing.
@raise ValueError: If an unknown property is given in C{propertyNames}.
@return: The return value from calling dark.aa.propertiesForSequence:
a C{dict} keyed by (lowercase) property name, with values that are
C{list}s of the corresponding property value according to sequence
position.
"""
MISSING_AA_VALUE = -1.1
propertyValues = propertiesForSequence(sequence, propertyNames,
missingAAValue=MISSING_AA_VALUE)
if showFigure:
legend = []
x = np.arange(0, len(sequence))
plot = plt.plot if showLines else plt.scatter
for index, propertyName in enumerate(propertyValues):
color = TABLEAU20[index]
plot(x, propertyValues[propertyName], color=color)
legend.append(patches.Patch(color=color, label=propertyName))
plt.legend(handles=legend, loc=(0, 1.1))
plt.xlim(-0.2, len(sequence) - 0.8)
plt.ylim(min(MISSING_AA_VALUE, -1.1), 1.1)
plt.xlabel('Sequence index')
plt.ylabel('Property value')
plt.title(sequence.id)
plt.show()
return propertyValues
def plotAAClusters(sequence, propertyNames, showLines=True, showFigure=True):
"""
Plot amino acid property cluster numbers for a sequence.
@param sequence: An C{AARead} (or a subclass) instance.
@param propertyNames: An iterable of C{str} property names (each of which
must be a key of a key in the C{dark.aa.PROPERTY_CLUSTERS} C{dict}).
@param showLines: If C{True}, lines will be drawn between successive AA
property values. If not, just the values will be plotted as a scatter
plot (this greatly reduces visual clutter if the sequence is long and
AA property values are variable).
@param showFigure: If C{True}, display the plot. Passing C{False} is useful
in testing.
@raise ValueError: If an unknown property is given in C{propertyNames}.
@return: The return value from calling dark.aa.clustersForSequence:
a C{dict} keyed by (lowercase) property name, with values that are
C{list}s of the corresponding property value according to sequence
position.
"""
MISSING_AA_VALUE = 0
propertyClusters = clustersForSequence(sequence, propertyNames,
missingAAValue=MISSING_AA_VALUE)
if showFigure:
minCluster = 1
maxCluster = -1
legend = []
x = np.arange(0, len(sequence))
plot = plt.plot if showLines else plt.scatter
for index, propertyName in enumerate(propertyClusters):
color = TABLEAU20[index]
clusterNumbers = propertyClusters[propertyName]
plot(x, clusterNumbers, color=color)
legend.append(patches.Patch(color=color, label=propertyName))
propertyMinCluster = min(clusterNumbers)
if propertyMinCluster < minCluster:
minCluster = propertyMinCluster
propertyMaxCluster = max(clusterNumbers)
if propertyMaxCluster > maxCluster:
maxCluster = propertyMaxCluster
plt.legend(handles=legend, loc=(0, 1.1))
plt.xlim(-0.2, len(sequence) - 0.8)
plt.ylim(minCluster - 0.5, maxCluster + 0.5)
plt.yticks(range(maxCluster + 1))
plt.xlabel('Sequence index')
plt.ylabel('Property cluster number')
plt.title(sequence.id)
plt.show()
return propertyClusters
| 0 |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from nova import exception
from nova.i18n import _
# Define the minimum and maximum version of the API across all of the
# REST API. The format of the version is:
# X.Y where:
#
# - X will only be changed if a significant backwards incompatible API
# change is made which affects the API as whole. That is, something
# that is only very very rarely incremented.
#
# - Y when you make any change to the API. Note that this includes
# semantic changes which may not affect the input or output formats or
# even originate in the API code layer. We are not distinguishing
# between backwards compatible and backwards incompatible changes in
# the versioning system. It must be made clear in the documentation as
# to what is a backwards compatible change and what is a backwards
# incompatible one.
#
# You must update the API version history string below with a one or
# two line description as well as update rest_api_version_history.rst
REST_API_VERSION_HISTORY = """REST API Version History:
* 2.1 - Initial version. Equivalent to v2.0 code
* 2.2 - Adds (keypair) type parameter for os-keypairs plugin
Fixes success status code for create/delete a keypair method
* 2.3 - Exposes additional os-extended-server-attributes
Exposes delete_on_termination for os-extended-volumes
* 2.4 - Exposes reserved field in os-fixed-ips.
* 2.5 - Allow server search option ip6 for non-admin
* 2.6 - Consolidate the APIs for getting remote consoles
* 2.7 - Check flavor type before add tenant access.
* 2.8 - Add new protocol for VM console (mks)
* 2.9 - Exposes lock information in server details.
* 2.10 - Allow admins to query, create and delete keypairs owned by any
user.
* 2.11 - Exposes forced_down attribute for os-services
* 2.12 - Exposes VIF net-id in os-virtual-interfaces
"""
# The minimum and maximum versions of the API supported
# The default api version request is definied to be the
# the minimum version of the API supported.
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = "2.1"
_MAX_API_VERSION = "2.12"
DEFAULT_API_VERSION = _MIN_API_VERSION
# NOTE(cyeoh): min and max versions declared as functions so we can
# mock them for unittests. Do not use the constants directly anywhere
# else.
def min_api_version():
return APIVersionRequest(_MIN_API_VERSION)
def max_api_version():
return APIVersionRequest(_MAX_API_VERSION)
class APIVersionRequest(object):
"""This class represents an API Version Request with convenience
methods for manipulation and comparison of version
numbers that we need to do to implement microversions.
"""
def __init__(self, version_string=None):
"""Create an API version request object.
:param version_string: String representation of APIVersionRequest.
Correct format is 'X.Y', where 'X' and 'Y' are int values.
None value should be used to create Null APIVersionRequest,
which is equal to 0.0
"""
self.ver_major = 0
self.ver_minor = 0
if version_string is not None:
match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$",
version_string)
if match:
self.ver_major = int(match.group(1))
self.ver_minor = int(match.group(2))
else:
raise exception.InvalidAPIVersionString(version=version_string)
def __str__(self):
"""Debug/Logging representation of object."""
return ("API Version Request Major: %s, Minor: %s"
% (self.ver_major, self.ver_minor))
def is_null(self):
return self.ver_major == 0 and self.ver_minor == 0
def _format_type_error(self, other):
return TypeError(_("'%(other)s' should be an instance of '%(cls)s'") %
{"other": other, "cls": self.__class__})
def __lt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) <
(other.ver_major, other.ver_minor))
def __eq__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) ==
(other.ver_major, other.ver_minor))
def __gt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) >
(other.ver_major, other.ver_minor))
def __le__(self, other):
return self < other or self == other
def __ne__(self, other):
return not self.__eq__(other)
def __ge__(self, other):
return self > other or self == other
def matches(self, min_version, max_version):
"""Returns whether the version object represents a version
greater than or equal to the minimum version and less than
or equal to the maximum version.
@param min_version: Minimum acceptable version.
@param max_version: Maximum acceptable version.
@returns: boolean
If min_version is null then there is no minimum limit.
If max_version is null then there is no maximum limit.
If self is null then raise ValueError
"""
if self.is_null():
raise ValueError
if max_version.is_null() and min_version.is_null():
return True
elif max_version.is_null():
return min_version <= self
elif min_version.is_null():
return self <= max_version
else:
return min_version <= self <= max_version
def get_string(self):
"""Converts object to string representation which if used to create
an APIVersionRequest object results in the same version request.
"""
if self.is_null():
raise ValueError
return "%s.%s" % (self.ver_major, self.ver_minor)
| 0 |
import copy
import logging
import numpy as np
import pickle
from typing import Dict, List, Optional, Tuple, Union, Any
from ray.tune.result import DEFAULT_METRIC
from ray.tune.sample import Categorical, Domain, Float, Integer, Quantized, \
LogUniform
from ray.tune.suggest.suggestion import UNRESOLVED_SEARCH_SPACE, \
UNDEFINED_METRIC_MODE, UNDEFINED_SEARCH_SPACE
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils import flatten_dict
from ray.tune.utils.util import is_nan_or_inf, unflatten_dict, \
validate_warmstart
try:
import skopt as sko
except ImportError:
sko = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
class SkOptSearch(Searcher):
"""Uses Scikit Optimize (skopt) to optimize hyperparameters.
Scikit-optimize is a black-box optimization library.
Read more here: https://scikit-optimize.github.io.
You will need to install Scikit-Optimize to use this module.
.. code-block:: bash
pip install scikit-optimize
This Search Algorithm requires you to pass in a `skopt Optimizer object`_.
This searcher will automatically filter out any NaN, inf or -inf
results.
Parameters:
optimizer (skopt.optimizer.Optimizer): Optimizer provided
from skopt.
space (dict|list): A dict mapping parameter names to valid parameters,
i.e. tuples for numerical parameters and lists for categorical
parameters. If you passed an optimizer instance as the
`optimizer` argument, this should be a list of parameter names
instead.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
evaluated_rewards (list): If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate. (See tune/examples/skopt_example.py)
convert_to_python (bool): SkOpt outputs numpy primitives (e.g.
``np.int64``) instead of Python types. If this setting is set
to ``True``, the values will be converted to Python primitives.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
Tune automatically converts search spaces to SkOpt's format:
.. code-block:: python
config = {
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100)
}
current_best_params = [
{
"width": 10,
"height": 0,
},
{
"width": 15,
"height": -20,
}
]
skopt_search = SkOptSearch(
metric="mean_loss",
mode="min",
points_to_evaluate=current_best_params)
tune.run(my_trainable, config=config, search_alg=skopt_search)
If you would like to pass the search space/optimizer manually,
the code would look like this:
.. code-block:: python
parameter_names = ["width", "height"]
parameter_ranges = [(0,20),(-100,100)]
current_best_params = [[10, 0], [15, -20]]
skopt_search = SkOptSearch(
parameter_names=parameter_names,
parameter_ranges=parameter_ranges,
metric="mean_loss",
mode="min",
points_to_evaluate=current_best_params)
tune.run(my_trainable, search_alg=skopt_search)
"""
def __init__(self,
optimizer: Optional["sko.optimizer.Optimizer"] = None,
space: Union[List[str], Dict[str, Union[Tuple, List]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
evaluated_rewards: Optional[List] = None,
convert_to_python: bool = True,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
assert sko is not None, ("skopt must be installed! "
"You can install Skopt with the command: "
"`pip install scikit-optimize`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
super(SkOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
self._initial_points = []
self._parameters = None
self._parameter_names = None
self._parameter_ranges = None
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space, join=True)
self._space = space
if self._space:
if isinstance(optimizer, sko.Optimizer):
if not isinstance(space, list):
raise ValueError(
"You passed an optimizer instance to SkOpt. Your "
"`space` parameter should be a list of parameter"
"names.")
self._parameter_names = space
else:
self._parameter_names = list(space.keys())
self._parameter_ranges = list(space.values())
self._points_to_evaluate = copy.deepcopy(points_to_evaluate)
self._evaluated_rewards = evaluated_rewards
self._convert_to_python = convert_to_python
self._skopt_opt = optimizer
if self._skopt_opt or self._space:
self._setup_skopt()
self._live_trial_mapping = {}
def _setup_skopt(self):
if self._points_to_evaluate and isinstance(self._points_to_evaluate,
list):
if isinstance(self._points_to_evaluate[0], list):
# Keep backwards compatibility
self._points_to_evaluate = [
dict(zip(self._parameter_names, point))
for point in self._points_to_evaluate
]
# Else: self._points_to_evaluate is already in correct format
validate_warmstart(self._parameter_names, self._points_to_evaluate,
self._evaluated_rewards)
if not self._skopt_opt:
if not self._space:
raise ValueError(
"If you don't pass an optimizer instance to SkOptSearch, "
"pass a valid `space` parameter.")
self._skopt_opt = sko.Optimizer(self._parameter_ranges)
if self._points_to_evaluate and self._evaluated_rewards:
skopt_points = [[point[par] for par in self._parameter_names]
for point in self._points_to_evaluate]
self._skopt_opt.tell(skopt_points, self._evaluated_rewards)
elif self._points_to_evaluate:
self._initial_points = self._points_to_evaluate
self._parameters = self._parameter_names
# Skopt internally minimizes, so "max" => -1
if self._mode == "max":
self._metric_op = -1.
elif self._mode == "min":
self._metric_op = 1.
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self._skopt_opt:
return False
space = self.convert_search_space(config)
self._space = space
self._parameter_names = list(space.keys())
self._parameter_ranges = list(space.values())
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_skopt()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._skopt_opt:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
if self.max_concurrent:
if len(self._live_trial_mapping) >= self.max_concurrent:
return None
if self._initial_points:
suggested_config = self._initial_points.pop(0)
skopt_config = [suggested_config[par] for par in self._parameters]
else:
skopt_config = self._skopt_opt.ask()
suggested_config = dict(zip(self._parameters, skopt_config))
self._live_trial_mapping[trial_id] = skopt_config
if self._convert_to_python:
for k, v in list(suggested_config.items()):
if isinstance(v, np.number):
suggested_config[k] = v.item()
return unflatten_dict(suggested_config)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
The result is internally negated when interacting with Skopt
so that Skopt Optimizers can "maximize" this value,
as it minimizes on default.
"""
if result:
self._process_result(trial_id, result)
self._live_trial_mapping.pop(trial_id)
def _process_result(self, trial_id: str, result: Dict):
skopt_trial_info = self._live_trial_mapping[trial_id]
if result and not is_nan_or_inf(result[self._metric]):
self._skopt_opt.tell(skopt_trial_info,
self._metric_op * result[self._metric])
def get_state(self) -> Dict[str, Any]:
state = self.__dict__.copy()
return state
def set_state(self, state: Dict[str, Any]):
self.__dict__.update(state)
def save(self, checkpoint_path: str):
with open(checkpoint_path, "wb") as f:
pickle.dump((self._initial_points, self._skopt_opt), f)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as f:
state = pickle.load(f)
self._initial_points, self._skopt_opt = state
@staticmethod
def convert_search_space(spec: Dict, join: bool = False) -> Dict:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a SkOpt search space.")
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(domain: Domain) -> Union[Tuple, List]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning("SkOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if isinstance(domain.sampler, LogUniform):
return sko.space.Real(
domain.lower, domain.upper, prior="log-uniform")
return sko.space.Real(
domain.lower, domain.upper, prior="uniform")
elif isinstance(domain, Integer):
if isinstance(domain.sampler, LogUniform):
return sko.space.Integer(
domain.lower, domain.upper - 1, prior="log-uniform")
return sko.space.Integer(
domain.lower, domain.upper - 1, prior="uniform")
elif isinstance(domain, Categorical):
return domain.categories
raise ValueError("SkOpt does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__,
type(domain.sampler).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
space = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
if join:
spec.update(space)
space = spec
return space
| 0 |
#A simple script to open a CSV and print the paths and filenames from a desired path to it.
#Updated to Python3 5/8/2020
import os, csv, argparse
import datetime
parser = argparse.ArgumentParser()
parser.add_argument("-o","--output", help="Path to and filename for the CSV to create")
parser.add_argument("-d","--directory", help="Path to directory to list in the CSV")
args = parser.parse_args()
if args.output:
#global csvOut
csvOut = args.output
if args.directory:
global inputDir
inputDir = args.directory
with open (csvOut, 'w', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(['path','title','type','date','creator','rights','rights_note','aspace_id','identifier'])
for path, dirs, files in os.walk(inputDir):
for filename in files:
full_path = os.path.join(path,filename)
full_path = full_path.replace("\\","/")
full_path = full_path.replace('F:/repo/rub/','/nas/rubenstein_archive/')
#print filename
#add additional text entensions here
if '.txt' in filename or '.TXT' in filename or '.rtf' in filename or '.RTF' in filename or '.doc' in filename or '.DOC' in filename or '.DOCX' in filename or '.docx' in filename or '.PDF' in filename or '.pdf' in filename:
type = 'text'
#add some more common image extentions here
elif '.JPG' in filename or '.jpg' in filename or '.jpeg' in filename or '.JPEG' in filename or '.PNG' in filename or '.png' in filename or '.GIF' in filename or '.gif' in filename or '.BMP' in filename or '.bmp' in filename or '.tif' in filename or '.TIF' in filename or '.TIFF' in filename or '.tiff' in filename or '.HEIC' in filename or '.heic' in filename:
type = 'image'
elif '.XLSX' in filename or '.xlsx' in filename or '.xls' in filename or '.XLS' in filename or '.accdb' in filename or '.ACCDB' in filename:
type = 'dataset'
elif '.PPTX' in filename or '.pptx' in filename or '.PPT' in filename or '.ppt' in filename:
type = 'interactiveResource'
else:
type = 'TYPE?'
#date stuff is returning too recent modified dates.....
#date = os.path.getmtime(path)
#date_human = datetime.datetime.fromtimestamp(int(date)).strftime('%Y-%m-%d')
writer.writerow([full_path,filename,type])
| 0.027864 |
# Copyright (c) 2009, 2010 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import getopt
import sys
import ovs.json
def print_json(json):
if type(json) in [str, unicode]:
print "error: %s" % json
return False
else:
ovs.json.to_stream(json, sys.stdout)
sys.stdout.write("\n")
return True
def parse_multiple(stream):
buf = stream.read(4096)
ok = True
parser = None
while len(buf):
if parser is None and buf[0] in " \t\r\n":
buf = buf[1:]
else:
if parser is None:
parser = ovs.json.Parser()
n = parser.feed(buf)
buf = buf[n:]
if len(buf):
if not print_json(parser.finish()):
ok = False
parser = None
if len(buf) == 0:
buf = stream.read(4096)
if parser and not print_json(parser.finish()):
ok = False
return ok
def main(argv):
argv0 = argv[0]
# Make stdout and stderr UTF-8, even if they are redirected to a file.
sys.stdout = codecs.getwriter("utf-8")(sys.stdout)
sys.stderr = codecs.getwriter("utf-8")(sys.stderr)
try:
options, args = getopt.gnu_getopt(argv[1:], '', ['multiple'])
except getopt.GetoptError, geo:
sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
sys.exit(1)
multiple = False
for key, value in options:
if key == '--multiple':
multiple = True
else:
sys.stderr.write("%s: unhandled option %s\n" % (argv0, key))
sys.exit(1)
if len(args) != 1:
sys.stderr.write("usage: %s [--multiple] INPUT.json\n" % argv0)
sys.exit(1)
input_file = args[0]
if input_file == "-":
stream = sys.stdin
else:
stream = open(input_file, "r")
if multiple:
ok = parse_multiple(stream)
else:
ok = print_json(ovs.json.from_stream(stream))
if not ok:
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
| 0 |
import os
from xml.etree import ElementTree, ElementInclude
from bocca_xml import BoccaProject
from bocca_xml import bocca_object_names
from bocca_objects import new_object
from bocca_build import BoccaBuild
class Error (Exception):
"""Base class for exceptions in this module"""
pass
import types
class UnexpectedElementError (Error):
"""Exception raised for an unexpected element"""
def __init__ (self, expected, found):
if type (expected) in types.StringTypes:
self.msg = 'Expected %s but found %s' % (expected, found)
else:
expected = ', '.join (expected)
self.msg = 'Expected one of %s but found %s' % (expected, found)
self.expected = expected
self.found = found
def __str__ (self):
return self.msg
class MissingElementError (Error):
"""Exception raised for a missing element that is required"""
def __init__ (self, missing):
self.msg = 'Missing element %s' % missing
self.missing = missing
def __str__ (self):
return self.msg
class MissingAttributeError (Error):
"""Exception raised for a missing attribute that is required"""
def __init__ (self, attrib):
self.attrib = attrib
self.msg = 'Missing required attribute %s' % attrib
def __str__ (self):
return self.msg
def project_from_element (element):
if element.tag != 'project':
raise UnexpectedElementError ('project', element.tag)
if not element.attrib.has_key ('package'):
element.attrib['package'] = None
try:
proj = BoccaProject (element.attrib['name'], element.attrib['package'])
except KeyError as e:
raise MissingAttributeError (e)
for key in element.attrib.keys ():
if key not in ['name', 'package']:
proj.set_var (key, element.attrib[key])
opts = {}
for child in element.getchildren ():
if child.tag in proj.valid_options ():
if opts.has_key (child.tag):
opts[child.tag].append (child.text)
else:
opts[child.tag] = [child.text]
for (key, value) in opts.items ():
proj.set_var (key, value)
return proj
def object_from_element (element):
if element.tag not in bocca_object_names:
raise UnexpectedElementError (bocca_object_names, element.tag)
try:
obj = new_object (element.attrib['name'], element.tag)
except KeyError as e:
raise AttributError (e)
except ObjectTypeError as e:
raise
for key in element.attrib.keys ():
if key not in ['name']:
obj.set_var (key, element.attrib[key])
#for child in element.getchildren ():
# if child.tag in obj.valid_options ():
# obj.set_var (child.tag, child.text)
opts = {}
for child in element.getchildren ():
if child.tag in obj.valid_options ():
if opts.has_key (child.tag):
opts[child.tag].append (child.text)
else:
opts[child.tag] = [child.text]
for (key, value) in opts.items ():
obj.set_var (key, value)
return obj
#vars = get_node_vars (node)
#obj = objects[node.nodeName] (vars['name'])
#for var_name in vars.keys ():
# obj.set_var (var_name, vars[var_name])
#return obj
def parse_file (file, srcdir='.', dstdir='.', no_import=False):
try:
doc = ElementTree.parse (file)
except IOError as e:
raise
#raise BadFileError (file, e.strerror)
proj = doc.getroot ()
if proj.tag != 'project':
raise MissingElementError ('project')
#print 'bocca create project %(name)s --language=%(language)s --package=%(package)s' % proj.attrib
#for obj in get_bocca_objects (proj):
# obj.attrib['tag'] = obj.tag
# command_string = 'bocca create %(tag)s %(name)s' % obj.attrib
# for child in obj.getchildren ():
# command_string += ' --%s=%s' % (child.tag, child.text)
# print command_string
build = BoccaBuild (project_from_element (proj), no_import=no_import)
build.set_srcdir (os.path.abspath (srcdir))
build.set_destdir (os.path.abspath (os.path.join (dstdir, build.name ())))
#for node in proj.getchildren ():
for node in get_bocca_objects (proj):
build.add_node (object_from_element (node))
return build
def get_bocca_objects (element):
objs = []
ElementInclude.include (element)
for child in element.getchildren ():
if child.tag == 'project':
objs.extend (get_bocca_objects (child))
elif child.tag in bocca_object_names:
objs.append (child)
return remove_dups (objs)
def remove_dups (objs, idfun=None):
if idfun is None:
def idfun (x): return x.attrib['name']
seen = {}
result = []
for item in objs:
marker = idfun (item)
if marker in seen:
continue
seen[marker] = 1
result.append (item)
return result
| 0.034155 |
"""
All of the Enums that are used throughout the chardet package.
:author: Dan Blanchard (dan.blanchard@gmail.com)
"""
class InputState(object):
"""
This enum represents the different states a universal detector can be in.
"""
pure_ascii = 0
esc_ascii = 1
high_byte = 2
class LanguageFilter(object):
"""
This enum represents the different language filters we can apply to a
``UniversalDetector``.
"""
chinese_simplified = 0x01
chinese_traditional = 0x02
japanese = 0x04
korean = 0x08
non_cjk = 0x10
all = 0x1F
chinese = chinese_simplified | chinese_traditional
cjk = chinese | japanese | korean
class ProbingState(object):
"""
This enum represents the different states a prober can be in.
"""
detecting = 0
found_it = 1
not_me = 2
class MachineState(object):
"""
This enum represents the different states a state machine can be in.
"""
start = 0
error = 1
its_me = 2
| 0 |
# -*- coding: utf-8 -*-
from navmazing import NavigateToSibling
from widgetastic.widget import View
from widgetastic_patternfly import Accordion
from widgetastic_patternfly import Dropdown
from cfme.base import Server
from cfme.base.login import BaseLoggedInPage
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigator
from widgetastic_manageiq import ManageIQTree
class ControlExplorerView(BaseLoggedInPage):
@property
def in_control_explorer(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Control', 'Explorer'])
@property
def is_displayed(self):
return self.in_control_explorer
@View.nested
class policy_profiles(Accordion): # noqa
ACCORDION_NAME = "Policy Profiles"
tree = ManageIQTree()
@View.nested
class policies(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class events(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class conditions(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class actions(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class alert_profiles(Accordion): # noqa
ACCORDION_NAME = "Alert Profiles"
tree = ManageIQTree()
@View.nested
class alerts(Accordion): # noqa
tree = ManageIQTree()
configuration = Dropdown("Configuration")
@navigator.register(Server)
class ControlExplorer(CFMENavigateStep):
VIEW = ControlExplorerView
prerequisite = NavigateToSibling("LoggedIn")
def step(self, *args, **kwargs):
self.view.navigation.select("Control", "Explorer")
| 0 |
#################################################################################
# FULL BACKUP UYILITY FOR ENIGMA2, SUPPORTS THE MODELS OE-A 2.3 #
# #
# MAKES A FULLBACK-UP READY FOR FLASHING. #
# #
#################################################################################
from enigma import getEnigmaVersionString
from Screens.Screen import Screen
from Components.Button import Button
from Components.Label import Label
from Components.ActionMap import ActionMap
from Components.About import about
from Screens.Console import Console
from Screens.MessageBox import MessageBox
from time import time, strftime, localtime
from os import path, system, makedirs, listdir, walk, statvfs
import commands
import datetime
from boxbranding import getBoxType, getMachineBrand, getMachineName, getDriverDate, getImageVersion, getImageBuild, getBrandOEM, getMachineBuild, getImageFolder, getMachineUBINIZE, getMachineMKUBIFS, getMachineMtdKernel, getMachineKernelFile, getMachineRootFile, getImageFileSystem
VERSION = "Version 4.3 opendroid"
HaveGZkernel = True
if getBrandOEM() in ("fulan"):
HaveGZkernel = False
def Freespace(dev):
statdev = statvfs(dev)
space = (statdev.f_bavail * statdev.f_frsize) / 1024
print "[FULL BACKUP] Free space on %s = %i kilobytes" %(dev, space)
return space
class ImageBackup(Screen):
skin = """
<screen position="center,center" size="560,400" title="Image Backup">
<ePixmap position="0,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap position="280,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap position="420,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_yellow" position="280,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="info-hdd" position="10,30" zPosition="1" size="450,100" font="Regular;20" halign="left" valign="top" transparent="1" />
<widget name="info-usb" position="10,150" zPosition="1" size="450,200" font="Regular;20" halign="left" valign="top" transparent="1" />
<widget name="info-yellow" position="10,290" zPosition="1" size="550,200" font="Regular;20" halign="left" valign="top" transparent="1" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self.session = session
self.MODEL = getBoxType()
self.OEM = getBrandOEM()
self.MACHINEBUILD = getMachineBuild()
self.MACHINENAME = getMachineName()
self.MACHINEBRAND = getMachineBrand()
self.IMAGEFOLDER = getImageFolder()
self.UBINIZE_ARGS = getMachineUBINIZE()
self.MKUBIFS_ARGS = getMachineMKUBIFS()
self.MTDKERNEL = getMachineMtdKernel()
self.ROOTFSBIN = getMachineRootFile()
self.KERNELBIN = getMachineKernelFile()
self.ROOTFSTYPE = getImageFileSystem()
print "[FULL BACKUP] BOX MACHINEBUILD = >%s<" %self.MACHINEBUILD
print "[FULL BACKUP] BOX MACHINENAME = >%s<" %self.MACHINENAME
print "[FULL BACKUP] BOX MACHINEBRAND = >%s<" %self.MACHINEBRAND
print "[FULL BACKUP] BOX MODEL = >%s<" %self.MODEL
print "[FULL BACKUP] OEM MODEL = >%s<" %self.OEM
print "[FULL BACKUP] IMAGEFOLDER = >%s<" %self.IMAGEFOLDER
print "[FULL BACKUP] UBINIZE = >%s<" %self.UBINIZE_ARGS
print "[FULL BACKUP] MKUBIFS = >%s<" %self.MKUBIFS_ARGS
print "[FULL BACKUP] MTDKERNEL = >%s<" %self.MTDKERNEL
print "[FULL BACKUP] ROOTFSTYPE = >%s<" %self.ROOTFSTYPE
self["key_green"] = Button("USB")
self["key_red"] = Button("HDD")
self["key_blue"] = Button(_("Exit"))
self["key_yellow"] = Button("")
self["info-usb"] = Label(_("USB = Do you want to make a back-up on USB?\nThis will take between 4 and 15 minutes depending on the used filesystem and is fully automatic.\nMake sure you first insert an USB flash drive before you select USB."))
self["info-hdd"] = Label(_("HDD = Do you want to make an USB-back-up image on HDD? \nThis only takes 2 or 10 minutes and is fully automatic."))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"blue": self.quit,
"yellow": self.yellow,
"green": self.green,
"red": self.red,
"cancel": self.quit,
}, -2)
def check_hdd(self):
if not path.exists("/media/hdd"):
self.session.open(MessageBox, _("No /hdd found !!\nPlease make sure you have a HDD mounted.\n"), type = MessageBox.TYPE_ERROR)
return False
if Freespace('/media/hdd') < 300000:
self.session.open(MessageBox, _("Not enough free space on /hdd !!\nYou need at least 300Mb free space.\n"), type = MessageBox.TYPE_ERROR)
return False
return True
def check_usb(self, dev):
if Freespace(dev) < 300000:
self.session.open(MessageBox, _("Not enough free space on %s !!\nYou need at least 300Mb free space.\n" % dev), type = MessageBox.TYPE_ERROR)
return False
return True
def quit(self):
self.close()
def red(self):
if self.check_hdd():
self.doFullBackup("/hdd")
def green(self):
USB_DEVICE = self.SearchUSBcanidate()
if USB_DEVICE == 'XX':
text = _("No USB-Device found for fullbackup !!\n\n\n")
text += _("To back-up directly to the USB-stick, the USB-stick MUST\n")
text += _("contain a file with the name: \n\n")
text += _("backupstick or backupstick.txt")
self.session.open(MessageBox, text, type = MessageBox.TYPE_ERROR)
else:
if self.check_usb(USB_DEVICE):
self.doFullBackup(USB_DEVICE)
def yellow(self):
#// Not used
pass
def SearchUSBcanidate(self):
for paths, subdirs, files in walk("/media"):
for dir in subdirs:
if not dir == 'hdd' and not dir == 'net':
for file in listdir("/media/" + dir):
if file.find("backupstick") > -1:
print "USB-DEVICE found on: /media/%s" % dir
return "/media/" + dir
break
return "XX"
def doFullBackup(self, DIRECTORY):
self.DIRECTORY = DIRECTORY
self.TITLE = _("Full back-up on %s") % (self.DIRECTORY)
self.START = time()
self.DATE = strftime("%Y%m%d_%H%M", localtime(self.START))
self.IMAGEVERSION = self.imageInfo() #strftime("%Y%m%d", localtime(self.START))
if self.ROOTFSTYPE == "ubi":
self.MKFS = "/usr/sbin/mkfs.ubifs"
else:
self.MKFS = "/usr/sbin/mkfs.jffs2"
self.UBINIZE = "/usr/sbin/ubinize"
self.NANDDUMP = "/usr/sbin/nanddump"
self.WORKDIR= "%s/bi" %self.DIRECTORY
self.TARGET="XX"
## TESTING IF ALL THE TOOLS FOR THE BUILDING PROCESS ARE PRESENT
if not path.exists(self.MKFS):
text = "%s not found !!" %self.MKFS
self.session.open(MessageBox, _(text), type = MessageBox.TYPE_ERROR)
return
if not path.exists(self.NANDDUMP):
text = "%s not found !!" %self.NANDDUMP
self.session.open(MessageBox, _(text), type = MessageBox.TYPE_ERROR)
return
self.SHOWNAME = "%s %s" %(self.MACHINEBRAND, self.MODEL)
self.MAINDESTOLD = "%s/%s" %(self.DIRECTORY, self.MODEL)
self.MAINDEST = "%s/%s" %(self.DIRECTORY,self.IMAGEFOLDER)
self.EXTRA = "%s/fullbackup_%s/%s/%s" % (self.DIRECTORY, self.MODEL, self.DATE, self.IMAGEFOLDER)
self.EXTRAOLD = "%s/fullbackup_%s/%s/%s" % (self.DIRECTORY, self.MODEL, self.DATE, self.MODEL)
self.message = "echo -e '\n"
self.message += (_("Back-up Tool for a %s\n" %self.SHOWNAME)).upper()
self.message += VERSION + '\n'
self.message += "_________________________________________________\n\n"
self.message += _("Please be patient, a backup will now be made,\n")
if self.ROOTFSTYPE == "ubi":
self.message += _("because of the used filesystem the back-up\n")
self.message += _("will take about 3-12 minutes for this system\n")
else:
self.message += _("This will take between 2 and 9 minutes\n")
self.message += "\n_________________________________________________\n\n"
self.message += "'"
## PREPARING THE BUILDING ENVIRONMENT
system("rm -rf %s" %self.WORKDIR)
if not path.exists(self.WORKDIR):
makedirs(self.WORKDIR)
if not path.exists("/tmp/bi/root"):
makedirs("/tmp/bi/root")
system("sync")
system("mount --bind / /tmp/bi/root")
if self.ROOTFSTYPE == "jffs2":
cmd1 = "%s --root=/tmp/bi/root --faketime --output=%s/root.jffs2 %s" % (self.MKFS, self.WORKDIR, self.MKUBIFS_ARGS)
cmd2 = None
else:
f = open("%s/ubinize.cfg" %self.WORKDIR, "w")
f.write("[ubifs]\n")
f.write("mode=ubi\n")
f.write("image=%s/root.ubi\n" %self.WORKDIR)
f.write("vol_id=0\n")
f.write("vol_type=dynamic\n")
f.write("vol_name=rootfs\n")
f.write("vol_flags=autoresize\n")
f.close()
ff = open("%s/root.ubi" %self.WORKDIR, "w")
ff.close()
cmd1 = "%s -r /tmp/bi/root -o %s/root.ubi %s" % (self.MKFS, self.WORKDIR, self.MKUBIFS_ARGS)
cmd2 = "%s -o %s/root.ubifs %s %s/ubinize.cfg" % (self.UBINIZE, self.WORKDIR, self.UBINIZE_ARGS, self.WORKDIR)
cmd3 = "mv %s/root.ubifs %s/root.%s" %(self.WORKDIR, self.WORKDIR, self.ROOTFSTYPE)
cmdlist = []
cmdlist.append(self.message)
cmdlist.append('echo "Create: root.%s\n"' %self.ROOTFSTYPE)
cmdlist.append(cmd1)
if cmd2:
cmdlist.append(cmd2)
cmdlist.append(cmd3)
cmdlist.append("chmod 644 %s/root.%s" %(self.WORKDIR, self.ROOTFSTYPE))
cmdlist.append('echo " "')
cmdlist.append('echo "Create: kerneldump"')
cmdlist.append('echo " "')
cmdlist.append("nanddump -a -f %s/vmlinux.gz /dev/%s" % (self.WORKDIR, self.MTDKERNEL))
cmdlist.append('echo " "')
if HaveGZkernel:
cmdlist.append('echo "Check: kerneldump"')
cmdlist.append("sync")
self.session.open(Console, title = self.TITLE, cmdlist = cmdlist, finishedCallback = self.doFullBackupCB, closeOnSuccess = True)
def doFullBackupCB(self):
if HaveGZkernel:
ret = commands.getoutput(' gzip -d %s/vmlinux.gz -c > /tmp/vmlinux.bin' % self.WORKDIR)
if ret:
text = "Kernel dump error\n"
text += "Please Flash your Kernel new and Backup again"
system('rm -rf /tmp/vmlinux.bin')
self.session.open(MessageBox, _(text), type = MessageBox.TYPE_ERROR)
return
cmdlist = []
cmdlist.append(self.message)
if HaveGZkernel:
cmdlist.append('echo "Kernel dump OK"')
cmdlist.append("rm -rf /tmp/vmlinux.bin")
cmdlist.append('echo "_________________________________________________"')
cmdlist.append('echo "Almost there... "')
cmdlist.append('echo "Now building the USB-Image"')
system('rm -rf %s' %self.MAINDEST)
if not path.exists(self.MAINDEST):
makedirs(self.MAINDEST)
if not path.exists(self.EXTRA):
makedirs(self.EXTRA)
f = open("%s/imageversion" %self.MAINDEST, "w")
f.write(self.IMAGEVERSION)
f.close()
system('mv %s/root.%s %s/%s' %(self.WORKDIR, self.ROOTFSTYPE, self.MAINDEST, self.ROOTFSBIN))
system('mv %s/vmlinux.gz %s/%s' %(self.WORKDIR, self.MAINDEST, self.KERNELBIN))
cmdlist.append('echo "rename this file to "force" to force an update without confirmation" > %s/noforce' %self.MAINDEST)
if self.MODEL in ("gbquad", "gbquadplus", "gb800ue", "gb800ueplus", "gbultraue"):
lcdwaitkey = '/usr/share/lcdwaitkey.bin'
lcdwarning = '/usr/share/lcdwarning.bin'
if path.exists(lcdwaitkey):
system('cp %s %s/lcdwaitkey.bin' %(lcdwaitkey, self.MAINDEST))
if path.exists(lcdwarning):
system('cp %s %s/lcdwarning.bin' %(lcdwarning, self.MAINDEST))
if self.MODEL == "gb800solo":
burnbat = "%s/fullbackup_%s/%s" % (self.DIRECTORY, self.MODEL, self.DATE)
f = open("%s/burn.bat" % (burnbat), "w")
f.write("flash -noheader usbdisk0:gigablue/solo/kernel.bin flash0.kernel\n")
f.write("flash -noheader usbdisk0:gigablue/solo/rootfs.bin flash0.rootfs\n")
f.write('setenv -p STARTUP "boot -z -elf flash0.kernel: ')
f.write("'rootfstype=jffs2 bmem=106M@150M root=/dev/mtdblock6 rw '")
f.write('"\n')
f.close()
cmdlist.append('cp -r %s/* %s/' % (self.MAINDEST, self.EXTRA))
cmdlist.append("sync")
file_found = True
if not path.exists("%s/%s" % (self.MAINDEST, self.ROOTFSBIN)):
print 'ROOTFS bin file not found'
file_found = False
if not path.exists("%s/%s" % (self.MAINDEST, self.KERNELBIN)):
print 'KERNEL bin file not found'
file_found = False
if path.exists("%s/noforce" % self.MAINDEST):
print 'NOFORCE bin file not found'
file_found = False
if file_found:
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo "USB Image created on:" %s' %self.MAINDEST)
cmdlist.append('echo "and there is made an extra copy on:"')
cmdlist.append('echo %s' %self.EXTRA)
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo " "')
cmdlist.append('echo "\nPlease wait...almost ready! "')
cmdlist.append('echo " "')
cmdlist.append('echo "To restore the image:"')
cmdlist.append('echo "Please check the manual of the receiver"')
cmdlist.append('echo "on how to restore the image"')
else:
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo "Image creation failed - "')
cmdlist.append('echo "Probable causes could be"')
cmdlist.append('echo " wrong back-up destination "')
cmdlist.append('echo " no space left on back-up device"')
cmdlist.append('echo " no writing permission on back-up device"')
cmdlist.append('echo " "')
if self.DIRECTORY == "/hdd":
self.TARGET = self.SearchUSBcanidate()
print "TARGET = %s" % self.TARGET
if self.TARGET == 'XX':
cmdlist.append('echo " "')
else:
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo " "')
cmdlist.append('echo "There is a valid USB-flash drive detected in one "')
cmdlist.append('echo "of the USB-ports, therefor an extra copy of the "')
cmdlist.append('echo "back-up image will now be copied to that USB- "')
cmdlist.append('echo "flash drive. "')
cmdlist.append('echo "This only takes about 1 or 2 minutes"')
cmdlist.append('echo " "')
cmdlist.append('mkdir -p %s/%s' % (self.TARGET, self.IMAGEFOLDER))
cmdlist.append('cp -r %s %s/' % (self.MAINDEST, self.TARGET))
cmdlist.append("sync")
cmdlist.append('echo "Backup finished and copied to your USB-flash drive"')
cmdlist.append("umount /tmp/bi/root")
cmdlist.append("rmdir /tmp/bi/root")
cmdlist.append("rmdir /tmp/bi")
cmdlist.append("rm -rf %s" % self.WORKDIR)
cmdlist.append("sleep 5")
END = time()
DIFF = int(END - self.START)
TIMELAP = str(datetime.timedelta(seconds=DIFF))
cmdlist.append('echo " Time required for this process: %s"' %TIMELAP)
self.session.open(Console, title = self.TITLE, cmdlist = cmdlist, closeOnSuccess = False)
def imageInfo(self):
AboutText = _("Full Image Backup ")
AboutText += _("By opendroid Image Team") + "\n"
AboutText += _("Support at") + " www.droidsat.org\n\n"
AboutText += _("[Image Info]\n")
AboutText += _("Model: %s %s\n") % (getMachineBrand(), getMachineName())
AboutText += _("Backup Date: %s\n") % strftime("%Y-%m-%d", localtime(self.START))
if path.exists('/proc/stb/info/chipset'):
AboutText += _("Chipset: BCM%s") % about.getChipSetString().lower().replace('\n','').replace('bcm','') + "\n"
AboutText += _("CPU: %s") % about.getCPUString() + "\n"
AboutText += _("Cores: %s") % about.getCpuCoresString() + "\n"
AboutText += _("Version: %s") % getImageVersion() + "\n"
AboutText += _("Build: %s") % getImageBuild() + "\n"
AboutText += _("Kernel: %s") % about.getKernelVersionString() + "\n"
string = getDriverDate()
year = string[0:4]
month = string[4:6]
day = string[6:8]
driversdate = '-'.join((year, month, day))
AboutText += _("Drivers:\t%s") % driversdate + "\n"
AboutText += _("Last update:\t%s") % getEnigmaVersionString() + "\n\n"
AboutText += _("[Enigma2 Settings]\n")
AboutText += commands.getoutput("cat /etc/enigma2/settings")
AboutText += _("\n\n[User - bouquets (TV)]\n")
try:
f = open("/etc/enigma2/bouquets.tv","r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("#SERVICE:"):
bouqet = line.split()
if len(bouqet) > 3:
bouqet[3] = bouqet[3].replace('"','')
f = open("/etc/enigma2/" + bouqet[3],"r")
userbouqet = f.readline()
AboutText += userbouqet.replace('#NAME ','')
f.close()
except:
AboutText += "Error reading bouquets.tv"
AboutText += _("\n[User - bouquets (RADIO)]\n")
try:
f = open("/etc/enigma2/bouquets.radio","r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("#SERVICE:"):
bouqet = line.split()
if len(bouqet) > 3:
bouqet[3] = bouqet[3].replace('"','')
f = open("/etc/enigma2/" + bouqet[3],"r")
userbouqet = f.readline()
AboutText += userbouqet.replace('#NAME ','')
f.close()
except:
AboutText += "Error reading bouquets.radio"
AboutText += _("\n[Installed Plugins]\n")
AboutText += commands.getoutput("opkg list_installed | grep enigma2-plugin-")
return AboutText
| 0.027803 |
#! /usr/bin/env python2
import json
import logging
import requests
import subprocess
import sys
import time
# arcyd start --foreground
_SERVICE_NAME = 'arcyd'
def main():
logging.basicConfig(
filename='/var/log/contend-leadership',
level=logging.DEBUG)
kv = json.load(sys.stdin)
logging.debug("Got kv: {}".format(kv))
has_leader = test_has_leader(kv)
if has_leader:
logging.debug("There is already a leader.")
else:
contend_leadership()
def test_has_leader(key_value):
if not isinstance(key_value, dict):
return False
return True if key_value.get("Session", False) else False
def contend_leadership():
consul_api = 'http://localhost:8500/v1/'
session_id = get_response_json(
requests.put(
consul_api + 'session/create',
json={'Name': _SERVICE_NAME}))['ID']
logging.debug("Got session ID: {}".format(session_id))
has_leader = False
while not has_leader:
is_leader = get_response_json(
requests.put(
'{}kv/{}/leader?acquire={}'.format(
consul_api, _SERVICE_NAME, session_id),
'I am the leader'))
logging.debug("Is leader:{}".format(is_leader))
if is_leader:
has_leader = True
logging.info("This node is the leader.")
logging.info(
subprocess.check_output(
['/bin/arcyd-do', 'start']))
else:
has_leader = test_has_leader(
get_response_json(
requests.get(
'{}kv/{}/leader'.format(
consul_api, _SERVICE_NAME)))[0])
logging.debug("Has leader:".format(has_leader))
if has_leader:
logging.info("This node is a follower.")
else:
logging.debug("Waiting to retry ..")
# there may be a 'lock-delay', wait before retry
time.sleep(5)
def get_response_json(response):
response.raise_for_status()
return response.json()
if __name__ == "__main__":
sys.exit(main())
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| 0 |
'''
Created on Oct 10, 2013
@author: mog
'''
from PySide import QtGui , QtCore
import math
from colorWidget import ColorWidget
from colorWidget import OptionColorWidget
import math
class PlotWidget(QtGui.QWidget):
def __init__(self,parent = None , width = 310 , heigth = 210):
QtGui.QWidget.__init__(self,parent)
##The width of the ui
self.width = width
##The heigth of the ui
self.height = heigth
##The points to be draw on the widget
#Pan amount on the widget
self.__pan = [0,0]
#Scale amount on the widget
self.__scale = 1.001
#The step between each grid line
self.__lineStep = 15
#Private value holder for show grid
self.__showGrid = 1
#Private value holder for showing the axis
self.__showAxis = True
#Private holder for the paint Mode
self.__paintMode = 1
#Private holder for drawing or not the numbers on the grid
self.__showNumbers = 1
#Attribute holding where the drag started in the widget
self._dragStart = [0,0]
#Attribute holding whether the widget is being dragged or not
self._dragging = 0
self.setGeometry(0,0,width,heigth)
self.setMouseTracking(False)
self.__computeOrigin()
self.optionW = None
self.__startBackgroundColor = [89, 89, 89]
self.__endBackgroundColor = [32, 32, 32]
self.__defaultColor = [255,0,0]
#list of list containig points to draw
self.point_sets =[]
#properties
@property
def startBackgroundColor(self):
return self.__startBackgroundColor
@startBackgroundColor.setter
def startBackgroundColor(self,value):
if type(value).__name__ != "list" :
raise RuntimeError ("Please provide an float list value ,example = [0,0,0] ")
return
self.__startBackgroundColor = value
self.update()
@property
def endBackgroundColor(self):
return self.__endBackgroundColor
@endBackgroundColor.setter
def endBackgroundColor(self,value):
if type(value).__name__ != "list" :
raise RuntimeError ("Please provide an float list value ,example = [0,0,0] ")
return
self.__endBackgroundColor = value
self.update()
@property
def paintMode(self):
return self.__paintMode
@paintMode.setter
def paintMode(self,value):
if type(value).__name__ != "int" :
raise RuntimeError ("Please provide an int value between 0-1 ")
return
self.__paintMode = value
self.update()
@property
def showAxis(self):
return self.__showAxis
@showAxis.setter
def showAxis(self,value):
if type(value).__name__ != "bool" :
raise RuntimeError ("Please provide a bool value")
return
self.__showAxis = value
self.update()
@property
def showGrid(self):
return self.__showGrid
@showGrid.setter
def showGrid(self,value):
if type(value).__name__ != "bool" :
raise RuntimeError ("Please provide a bool value")
return
self.__showGrid = value
self.update()
@property
def lineStep(self):
return self.__lineStep
@lineStep.setter
def lineStep(self,value):
if not type(value).__name__ in ["int" , "float"] :
raise RuntimeError ("Please provide a int or float value")
return
self.__lineStep = value
self.update()
@property
def scale(self):
return self.__scale
@scale.setter
def scale(self,value):
if not type(value).__name__ in ["int" , "float"] :
raise RuntimeError ("Please provide a int or float value")
return
self.__scale = value
self.update()
@property
def pan(self):
return self.__pan
@pan.setter
def pan(self,value):
if not type(value).__name__ in ["list"] :
raise RuntimeError ("Please provide a int or float value")
return
if len(value) != 2 :
raise RuntimeError ("Please provide a int2 or float2 value , ex [ 0,0]")
return
self.__pan = value
self.update()
def clear_data(self):
"""
This function removes any stored point inside the class, meaning nothing will be drawn
anymore
"""
self.point_sets = []
def add_points(self, points):
"""
This function adds a point set to draw to the internal data struct.
@param points: list of points to be drawn, each point is composed of a list or tuple of
two elements representing x and y positions, example:
[[x1,y1],[x2,y2],[x3,y3] .....]
"""
self.point_sets.append(points)
def mousePressEvent(self, event):
"""
mouse clicks events
"""
posX = event.pos().x()
posY = event.pos().y()
keymod = QtGui.QApplication.keyboardModifiers()
if keymod == QtCore.Qt.NoModifier :
if event.button() ==QtCore.Qt.MiddleButton:
self._dragStart = [posX , posY]
self._dragging = 1
if keymod == QtCore.Qt.AltModifier :
self.popOptions()
QtGui.QWidget.mouseReleaseEvent(self,event)
event.ignore()
def mouseMoveEvent(self,event):
posX = event.pos().x()
posY = event.pos().y()
if self._dragging:
x = (posX -self._dragStart[0])/float(self.scale)
y = (posY - self._dragStart[1])/float(self.scale)
self.pan = [self.pan[0] + x , self.pan[1] + y]
self._dragStart = [posX , posY]
QtGui.QWidget.mouseReleaseEvent(self,event)
def mouseReleaseEvent(self , event):
self._dragging = 0
#passing along the event
QtGui.QWidget.mouseReleaseEvent(self,event)
def wheelEvent(self , event):
keymod = QtGui.QApplication.keyboardModifiers()
if keymod == QtCore.Qt.NoModifier :
self.scale = self.scale + (event.delta()*0.003)
if keymod == QtCore.Qt.ControlModifier :
if event.delta() > 0 :
self.lineStep = self.__lineStep + 1
else :
self.lineStep = self.__lineStep - 1
def resizeEvent(self , event):
posX = event.size().width()
posY = event.size().height()
self.width = posX
self.height = posY
def __computeOrigin (self ):
'''
This procedure computes the origin based on widget size and pan
'''
self.origin = [(self.width/2.0 ) + (self.__pan[0] * self.scale),
( self.height/2.0 ) + (self.__pan[1]* self.scale) ]
def drawData(self,qp):
"""
This function implements the defualt behavior for drawing the data,
can be overridden to implement different behaviors
@param qp: QPainter to use for the drawing
"""
for points in self.point_sets:
pen= QtGui.QPen()
color = QtGui.QColor(*self.__defaultColor)
pen.setColor(color)
pen.setWidthF(2)
qp.setPen(pen)
if self.paintMode == 0 :
self.__drawPoints(qp,points)
elif self.paintMode == 1 :
self.__drawLines(qp,points)
def paintEvent(self, e):
'''
This procedure draws the widget
'''
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing)
self.drawBG(qp)
self.drawGrid(qp)
self.drawData(qp)
self.redrawContour(qp)
qp.end()
def drawBG(self, qp):
"""
Draws the background
@param qp: QPainter to use for the drawing
"""
pen= QtGui.QPen()
color = QtGui.QColor(255, 255, 255)
pen.setColor(color)
pen.setWidthF(3.5)
qp.setPen(pen)
start = QtCore.QPointF(0, 0)
stop = QtCore.QPointF(self.width, self.height)
gradient = QtGui.QLinearGradient(start, stop)
#setupping the gradieng
gradient.setColorAt(0, QtGui.QColor(self.__startBackgroundColor[0],
self.__startBackgroundColor[1],
self.__startBackgroundColor[2]))
gradient.setColorAt(1, QtGui.QColor(self.__endBackgroundColor[0],
self.__endBackgroundColor[1],
self.__endBackgroundColor[2]))
brush = QtGui.QBrush(gradient)
qp.setBrush(brush)
rectangle=QtCore.QRectF (0.0, 0.0, self.width, self.height);
qp.drawRoundedRect(rectangle, 8.0, 8.0);
def redrawContour (self, qp):
'''
This procedure rewrites the contour after everyrthing
so it s always on top
'''
pen= QtGui.QPen()
color = QtGui.QColor(255, 255, 255)
pen.setColor(color)
pen.setWidthF(3.5)
qp.setPen(pen)
qp.setBrush(QtGui.QColor(200, 0, 0 ,0))
rectangle=QtCore.QRectF (0.0, 0.0, self.width, self.height);
qp.drawRoundedRect(rectangle, 8.0, 8.0);
def drawGrid(self, qp):
'''
This procedure draws the grid
@param qp : QPainter , the painter to use to draw the ui
'''
self.__computeOrigin()
#create pen
pen= QtGui.QPen()
#get current width of the widget
width = self.width
height = self.height
width2 = width/2.0
height2= height/2.0
#Define numbers pen
penText= QtGui.QPen()
color = QtGui.QColor(255, 255, 255)
penText.setColor(color)
penText.setWidthF(2)
#draw grid lines
#pen color
if self.__showGrid == 1 :
color = QtGui.QColor(120, 120, 120)
pen.setColor(color)
pen.setWidthF(0.5)
qp.setPen(pen)
#compute how many vertical lines fits in the view then draw them
#check first if origin is left out or right out
vLineNumbRight = 0
vLineNumbLeft = 0
#Check if to draw the middle grid or not , the mid grid needs to be draw if the axis are of but the grid on
startRange = 1
if self.__showAxis == 0 :
startRange = 0
if width > self.origin[0] :
vLineNumbRight = int(width - self.origin[0]) // (self.__lineStep * self.scale) + 1
vLineNumbRight = int(vLineNumbRight)
if self.origin[0] > 0 :
vLineNumbLeft = int( self.origin[0] ) // (self.__lineStep * self.scale) +1
vLineNumbLeft = int(vLineNumbLeft)
upY = self.origin[1] + height2 -3
lowY = self.origin[1]- height2 + 3
for h in range(startRange,vLineNumbRight) :
qp.drawLine(self.origin[0] + (h * self.__lineStep* self.scale) , upY - (self.__pan[1]*self.scale),
self.origin[0] + (h * self.__lineStep* self.scale) , lowY - (self.__pan[1]*self.scale))
for h in range(1,vLineNumbLeft) :
qp.drawLine(self.origin[0] - (h * self.__lineStep* self.scale) , upY- (self.__pan[1]*self.scale),
self.origin[0] - (h * self.__lineStep* self.scale) , lowY- (self.__pan[1]*self.scale) )
#compute how many horizontal lines fits in the view then draw them
hLineNumbRight = 0
hLineNumbLeft = 0
if height > self.origin[1] :
hLineNumbRight = int(height - self.origin[1]) // (self.__lineStep * self.scale) + 1
hLineNumbRight = int(hLineNumbRight)
if self.origin[1] > 0 :
hLineNumbLeft = int( self.origin[1] ) // (self.__lineStep * self.scale) +1
hLineNumbLeft = int(hLineNumbLeft)
rightX = self.origin[0] + width2 -3
leftX = self.origin[0]- width2 + 3
for h in range(startRange, hLineNumbRight) :
qp.drawLine(rightX - (self.__pan[0]*self.scale), self.origin[1]+ (h * self.__lineStep* self.scale) ,
leftX- (self.__pan[0]*self.scale) ,self.origin[1] + (h * self.__lineStep* self.scale) )
for h in range(1, hLineNumbLeft) :
qp.drawLine(rightX - (self.__pan[0]*self.scale), self.origin[1] - (h * self.__lineStep* self.scale) ,
leftX - ( self.__pan[0]*self.scale) , self.origin[1] - (h * self.__lineStep* self.scale) )
qp.setPen(penText)
point = QtCore.QPoint()
for h in range(1, hLineNumbLeft+1) :
point = QtCore.QPoint(self.origin[0] + 2, self.origin[1] - (h * self.__lineStep* self.scale) -2)
qp.drawText(point , str(h * self.__lineStep))
for h in range(1, hLineNumbRight+1) :
point = QtCore.QPoint(self.origin[0] + 2, self.origin[1] + (h * self.__lineStep* self.scale) -2)
qp.drawText(point , str(-h * self.__lineStep))
for h in range(1,vLineNumbRight +1) :
point = QtCore.QPoint(self.origin[0] + (h * self.__lineStep* self.scale) +2 , self.origin[1] - 4)
qp.drawText(point , str(h * self.__lineStep))
for h in range(1,vLineNumbLeft + 1) :
point = QtCore.QPoint(self.origin[0] - (h * self.__lineStep* self.scale) +2 , self.origin[1] - 4)
qp.drawText(point , str(-h * self.__lineStep))
if self.__showAxis == 1 :
#draw main cross
color = QtGui.QColor(0, 0, 0)
pen.setColor(color)
pen.setWidthF(1)
qp.setPen(pen)
#orizontal line
#the 3 offset is in order to not draw over the border
#Check if the line is actually on screen
if height > self.origin[1] and self.origin[1] > 0:
qp.drawLine((self.origin[0] - width2 - (self.__pan[0]*self.scale) + 3)
, self.origin[1] ,
(self.origin[0] + width2 - (self.__pan[0]*self.scale) -3 ),
self.origin[1] )
#vertical line
#Check if the line is actually on screen
if width > self.origin[0] and self.origin[0] > 0:
qp.drawLine(self.origin[0] , (self.origin[1] + height2 - (self.__pan[1]*self.scale) -3),
self.origin[0] , (self.origin[1]- height2 - (self.__pan[1]*self.scale) + 3) )
pen.setWidthF(3.5)
qp.setPen(pen)
qp.drawPoint(self.origin[0], self.origin[1])
if self.__showNumbers == 1 :
#draw the zero
qp.setPen(penText)
qp.setFont(QtGui.QFont('Decorative', 10))
qp.drawText(QtCore.QPoint(self.origin[0] +2 , self.origin[1] - 2) , "0")
def __drawPoints(self ,qp,points):
#configuring the pen
pen= QtGui.QPen()
color = QtGui.QColor(255,0 , 0)
pen.setColor(color)
pen.setWidthF(3.5)
qp.setPen(pen)
#looping the points and drawing them, there might be a method
#to draw all at once maybe?
for p in points :
qp.drawPoint( self.fixQPoint(p))
def __drawLines(self , qp, points) :
#configuring the pen
pen= QtGui.QPen()
color = QtGui.QColor(255,0 , 0)
pen.setColor(color)
pen.setWidthF(2)
qp.setPen(pen)
for i,p in enumerate(points[:-1]) :
qp.drawLine( self.fixQPoint(p) , self.fixQPoint(points[i+1]) )
def fixQPoint(self , point ):
'''
This procedure adds scale and pan to the computed point
@param point : float2 , the point
@param return : QPoint()
'''
#we flip the - y coordinate due to the qt coordinate system
return QtCore.QPointF((point[0] *self.scale )+ self.origin[0] ,
-(point[1] *self.scale ) + self.origin[1] )
def popOptions(self):
'''
This procedure shows up the settings ui
'''
self.optionW = OptionColorWidget(parent = self , plotter = self)
self.optionW.show()
| 0.026983 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('swamp_gurrcat')
mobileTemplate.setLevel(26)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Carnivore Meat")
mobileTemplate.setMeatAmount(65)
mobileTemplate.setHideType("Bristly Hide")
mobileTemplate.setHideAmount(35)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(30)
mobileTemplate.setSocialGroup("swamp gurrcat")
mobileTemplate.setAssistRange(2)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_gurrcat.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_2')
attacks.add('bm_slash_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('swamp_gurrcat', mobileTemplate)
return | 0.027397 |
import rospy
import mongodb_store_msgs.srv as dc_srv
import mongodb_store.util as dc_util
from mongodb_store_msgs.msg import StringPair, StringPairList, SerialisedMessage
from bson import json_util
from bson.objectid import ObjectId
import json
import copy
class MessageStoreProxy:
"""
A class that provides functions for storage and retrieval of ROS Message
objects in the mongodb_store. This is achieved by acting as a proxy to the
services provided by the MessageStore ROS node, and therefore requires the message
store node to be running in addition to the datacentre:
`rosrun mongodb_store message_store_node.py`
>>> from geometry_msgs.msg import Pose, Quaternion
>>> msg_store = MessageStoreProxy()
>>> p = Pose(Point(0, 1, 2), Quaternion(0, 0, 0 , 1))
>>> msg_store.insert_named("my favourite pose", p)
>>> retrieved = msg_store.query_named("my favourite pose", Pose._type)
For usage examples, please see `example_message_store_client.py` within the scripts
folder of mongodb_store.
"""
def __init__(self, service_prefix='/message_store', database='message_store', collection='message_store'):
"""
:Args:
| service_prefix (str): The prefix to the *insert*, *update*, *delete* and
*query_messages* ROS services/
| database (str): The MongoDB database that this object works with.
| collection (str): The MongoDB collection that this object works with.
"""
self.database = database
self.collection = collection
insert_service = service_prefix + '/insert'
update_service = service_prefix + '/update'
delete_service = service_prefix + '/delete'
query_ids_service = service_prefix + '/query_messages'
rospy.loginfo("Waiting for services...")
rospy.wait_for_service(insert_service)
rospy.wait_for_service(update_service)
rospy.wait_for_service(query_ids_service)
rospy.wait_for_service(delete_service)
rospy.loginfo("Done")
self.insert_srv = rospy.ServiceProxy(insert_service, dc_srv.MongoInsertMsg)
self.update_srv = rospy.ServiceProxy(update_service, dc_srv.MongoUpdateMsg)
self.query_id_srv = rospy.ServiceProxy(query_ids_service, dc_srv.MongoQueryMsg)
self.delete_srv = rospy.ServiceProxy(delete_service, dc_srv.MongoDeleteMsg)
def insert_named(self, name, message, meta = {}):
"""
Inserts a ROS message into the message storage, giving it a name for convenient
later retrieval.
.. note:: Multiple messages can be stored with the same name.
:Args:
| name (str): The name to refere to this message as.
| message (ROS Message): An instance of a ROS message type to store
| meta (dict): A dictionary of additional meta data to store in association
with thie message.
:Returns:
| (str) the ObjectId of the MongoDB document containing the stored message.
"""
# create a copy as we're modifying it
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.insert(message, meta_copy)
def insert(self, message, meta = {}):
"""
Inserts a ROS message into the message storage.
:Args:
| message (ROS Message): An instance of a ROS message type to store
| meta (dict): A dictionary of additional meta data to store in association
with thie message.
:Returns:
| (str) the ObjectId of the MongoDB document containing the stored message.
"""
# assume meta is a dict, convert k/v to tuple pairs
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta, default=json_util.default)),)
serialised_msg = dc_util.serialise_message(message)
return self.insert_srv(self.database, self.collection, serialised_msg, StringPairList(meta_tuple)).id
def query_id(self, id, type):
"""
Finds and returns the message with the given ID.
:Parameters:
| id (str): The ObjectID of the MongoDB document holding the message.
| type (str): The ROS message type of the stored messsage to retrieve.
:Returns:
| message (ROS message), meta (dict): The retrieved message and associated metadata
or *None* if the named message could not be found.
"""
return self.query(type, {'_id': ObjectId(id)}, {}, True)
def delete(self, message_id):
"""
Delete the message with the given ID.
:Parameters:
| message_id (str) : The ObjectID of the MongoDB document holding the message.
:Returns:
| bool : was the object successfully deleted.
"""
return self.delete_srv(self.database, self.collection, message_id)
def query_named(self, name, type, single = True, meta = {}):
"""
Finds and returns the message(s) with the given name.
:Args:
| name (str): The name of the stored messages to retrieve.
| type (str): The type of the stored message.
| single (bool): Should only one message be returned?
| meta (dict): Extra queries on the meta data of the message.
:Return:
| message (ROS message), meta (dict): The retrieved message and associated metadata
or *None* if the named message could not be found.
"""
# create a copy as we're modifying it
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.query(type, {}, meta_copy, single)
def update_named(self, name, message, meta = {}, upsert = False):
"""
Updates a named message.
:Args:
| name (str): The name of the stored messages to update.
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
meta_query = {}
meta_query["name"] = name
# make sure the name goes into the meta info after update
meta_copy = copy.copy(meta)
meta_copy["name"] = name
return self.update(message, meta_copy, {}, meta_query, upsert)
def update_id(self, id, message, meta = {}, upsert = False):
"""
Updates a message by MongoDB ObjectId.
:Args:
| id (str): The MongoDB ObjectId of the doucment storing the message.
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
msg_query = {'_id': ObjectId(id)}
meta_query = {}
return self.update(message, meta, msg_query, meta_query, upsert)
def update(self, message, meta = {}, message_query = {}, meta_query = {}, upsert = False):
"""
Updates a message.
:Args:
| message (ROS Message): The updated ROS message
| meta (dict): Updated meta data to store with the message.
| message_query (dict): A query to match the ROS message that is to be updated.
| meta_query (dict): A query to match against the meta data of the message to be updated
| upsert (bool): If True, insert the named message if it doesnt exist.
:Return:
| str, bool: The MongoDB ObjectID of the document, and whether it was altered by
the update.
"""
# serialise the json queries to strings using json_util.dumps
message_query_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(message_query, default=json_util.default)),)
meta_query_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta_query, default=json_util.default)),)
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta, default=json_util.default)),)
return self.update_srv(self.database, self.collection, upsert, StringPairList(message_query_tuple), StringPairList(meta_query_tuple), dc_util.serialise_message(message), StringPairList(meta_tuple))
"""
Returns [message, meta] where message is the queried message and meta a dictionary of meta information. If single is false returns a list of these lists.
"""
def query(self, type, message_query = {}, meta_query = {}, single = False, sort_query = []):
"""
Finds and returns message(s) matching the message and meta data queries.
:Parameters:
| type (str): The ROS message type of the stored messsage to retrieve.
| message_query (dict): A query to match the actual ROS message
| meta_query (dict): A query to match against the meta data of the message
| sort_query (list of tuple): A query to request sorted list to mongodb module
| single (bool): Should only one message be returned?
:Returns:
| [message, meta] where message is the queried message and meta a dictionary of
meta information. If single is false returns a list of these lists.
"""
# assume meta is a dict, convert k/v to tuple pairs for ROS msg type
# serialise the json queries to strings using json_util.dumps
message_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(message_query, default=json_util.default)),)
meta_tuple = (StringPair(dc_srv.MongoQueryMsgRequest.JSON_QUERY, json.dumps(meta_query, default=json_util.default)),)
if len(sort_query) > 0:
sort_tuple = [StringPair(str(k), str(v)) for k, v in sort_query]
else:
sort_tuple = []
# a tuple of SerialisedMessages
response = self.query_id_srv(self.database, self.collection, type, single, StringPairList(message_tuple), StringPairList(meta_tuple), StringPairList(sort_tuple))
# print response
if response.messages is None:
messages = []
metas = []
else:
messages = map(dc_util.deserialise_message, response.messages)
metas = map(dc_util.string_pair_list_to_dictionary, response.metas)
if single:
if len(messages) > 0:
return [messages[0], metas[0]]
else:
return [None, None]
else:
return zip(messages,metas)
| 0.037164 |
"""Perform simulations of air showers on a cluster of stations
This base class can be subclassed to provide various kinds of
simulations. These simulations will inherit the base functionallity from
this class, including the creation of event and coincidence tables to
store the results, which will look similar to regular HiSPARC data, such
that the same reconstruction analysis can be applied to both.
Example usage::
>>> import tables
>>> from sapphire.simulations.base import BaseSimulation
>>> from sapphire import ScienceParkCluster
>>> data = tables.open_file('/tmp/test_base_simulation.h5', 'w')
>>> cluster = ScienceParkCluster()
>>> sim = BaseSimulation(cluster, data, '/simulations/this_run', 10)
>>> sim.run()
"""
import warnings
import random
import numpy as np
import tables
from six import iteritems
from .. import storage
from ..analysis.process_events import ProcessEvents
from ..utils import pbar
class BaseSimulation(object):
"""Base class for simulations.
:param cluster: :class:`~sapphire.clusters.BaseCluster` instance.
:param data: writeable PyTables file handle.
:param output_path: path (as string) to the PyTables group (need not
exist) in which the result tables will be created.
:param n: number of simulations to perform.
:param seed: seed for the pseudo-random number generators.
:param progress: if True show a progressbar while simulating.
"""
def __init__(self, cluster, data, output_path='/', n=1, seed=None,
progress=True):
self.cluster = cluster
self.data = data
self.output_path = output_path
self.n = n
self.progress = progress
self._prepare_output_tables()
if seed is not None:
random.seed(seed)
np.random.seed(seed)
def _prepare_output_tables(self):
"""Prepare output tables in the output data file.
The groups and tables will be created in the output_path.
:raises tables.NodeError: If any of the groups (e.g.
'/coincidences') already exist a exception will be raised.
:raises tables.FileModeError: If the datafile is not writeable.
"""
self._prepare_coincidence_tables()
self._prepare_station_tables()
self._store_station_index()
def run(self):
"""Run the simulations."""
for (shower_id, shower_parameters) in enumerate(
self.generate_shower_parameters()):
station_events = self.simulate_events_for_shower(shower_parameters)
self.store_coincidence(shower_id, shower_parameters,
station_events)
def generate_shower_parameters(self):
"""Generate shower parameters like core position, energy, etc."""
shower_parameters = {'core_pos': (None, None),
'zenith': None,
'azimuth': None,
'size': None,
'energy': None,
'ext_timestamp': None}
for _ in pbar(range(self.n), show=self.progress):
yield shower_parameters
def simulate_events_for_shower(self, shower_parameters):
"""Simulate station events for a single shower"""
station_events = []
for station_id, station in enumerate(self.cluster.stations):
has_triggered, station_observables = \
self.simulate_station_response(station,
shower_parameters)
if has_triggered:
event_index = \
self.store_station_observables(station_id,
station_observables)
station_events.append((station_id, event_index))
return station_events
def simulate_station_response(self, station, shower_parameters):
"""Simulate station response to a shower."""
detector_observables = self.simulate_all_detectors(
station.detectors, shower_parameters)
has_triggered = self.simulate_trigger(detector_observables)
station_observables = \
self.process_detector_observables(detector_observables)
station_observables = self.simulate_gps(station_observables,
shower_parameters, station)
return has_triggered, station_observables
def simulate_all_detectors(self, detectors, shower_parameters):
"""Simulate response of all detectors in a station.
:param detectors: list of detectors
:param shower_parameters: parameters of the shower
"""
detector_observables = []
for detector in detectors:
observables = self.simulate_detector_response(detector,
shower_parameters)
detector_observables.append(observables)
return detector_observables
def simulate_detector_response(self, detector, shower_parameters):
"""Simulate detector response to a shower.
:param detector: :class:`~sapphire.clusters.Detector` instance
:param shower_parameters: shower parameters
:return: dictionary with keys 'n' (number of particles in
detector) and 't' (time of arrival of first detected particle).
"""
# implement this!
observables = {'n': 0., 't': -999}
return observables
def simulate_trigger(self, detector_observables):
"""Simulate a trigger response."""
return True
def simulate_gps(self, station_observables, shower_parameters, station):
"""Simulate gps timestamp."""
gps_timestamp = {'ext_timestamp': 0, 'timestamp': 0, 'nanoseconds': 0}
station_observables.update(gps_timestamp)
return station_observables
def process_detector_observables(self, detector_observables):
"""Process detector observables for a station.
The list of detector observables is converted into a dictionary
containing the familiar observables like pulseheights, n1, n2,
..., t1, t2, ..., integrals, etc.
:param detector_observables: list of observables of the detectors
making up a station.
:return: dictionary containing the familiar station observables
like n1, n2, n3, etc.
"""
station_observables = {'pulseheights': 4 * [-1.],
'integrals': 4 * [-1.]}
for detector_id, observables in enumerate(detector_observables, 1):
for key, value in iteritems(observables):
if key in ['n', 't']:
key = key + str(detector_id)
station_observables[key] = value
elif key in ['pulseheights', 'integrals']:
idx = detector_id - 1
station_observables[key][idx] = value
return station_observables
def store_station_observables(self, station_id, station_observables):
"""Store station observables.
:param station_id: the id of the station in self.cluster
:param station_observables: A dictionary containing the
variables to be stored for this event.
:return: The index (row number) of the newly added event.
"""
events_table = self.station_groups[station_id].events
row = events_table.row
row['event_id'] = events_table.nrows
for key, value in iteritems(station_observables):
if key in events_table.colnames:
row[key] = value
else:
warnings.warn('Unsupported variable')
row.append()
events_table.flush()
return events_table.nrows - 1
def store_coincidence(self, shower_id, shower_parameters,
station_events):
"""Store coincidence.
Store the information to find events of different stations
belonging to the same simulated shower in the coincidences
tables.
:param shower_id: The shower number for the coincidence id.
:param shower_parameters: A dictionary with the parameters of
the simulated shower.
:param station_events: A list of tuples containing the
station_id and event_index referring to the events that
participated in the coincidence.
"""
row = self.coincidences.row
row['id'] = shower_id
row['N'] = len(station_events)
row['x'], row['y'] = shower_parameters['core_pos']
row['zenith'] = shower_parameters['zenith']
row['azimuth'] = shower_parameters['azimuth']
row['size'] = shower_parameters['size']
row['energy'] = shower_parameters['energy']
timestamps = []
for station_id, event_index in station_events:
station = self.cluster.stations[station_id]
row['s%d' % station.number] = True
station_group = self.station_groups[station_id]
event = station_group.events[event_index]
timestamps.append((event['ext_timestamp'], event['timestamp'],
event['nanoseconds']))
try:
first_timestamp = sorted(timestamps)[0]
except IndexError:
first_timestamp = (0, 0, 0)
row['ext_timestamp'], row['timestamp'], row['nanoseconds'] = \
first_timestamp
row.append()
self.coincidences.flush()
self.c_index.append(station_events)
self.c_index.flush()
def _prepare_coincidence_tables(self):
"""Create coincidence tables
These are the same as the tables created by
:class:`~sapphire.analysis.coincidences.CoincidencesESD`.
This makes it easy to link events detected by multiple stations.
"""
self.coincidence_group = self.data.create_group(self.output_path,
'coincidences',
createparents=True)
try:
self.coincidence_group._v_attrs.cluster = self.cluster
except tables.HDF5ExtError:
warnings.warn('Unable to store cluster object, to large for HDF.')
description = storage.Coincidence
s_columns = {'s%d' % station.number: tables.BoolCol(pos=p)
for p, station in enumerate(self.cluster.stations, 12)}
description.columns.update(s_columns)
self.coincidences = self.data.create_table(
self.coincidence_group, 'coincidences', description)
self.c_index = self.data.create_vlarray(
self.coincidence_group, 'c_index', tables.UInt32Col(shape=2))
self.s_index = self.data.create_vlarray(
self.coincidence_group, 's_index', tables.VLStringAtom())
def _prepare_station_tables(self):
"""Create the groups and events table to store the observables
:param id: the station number, used for the group name
:param station: a :class:`sapphire.clusters.Station` object
"""
self.cluster_group = self.data.create_group(self.output_path,
'cluster_simulations',
createparents=True)
self.station_groups = []
for station in self.cluster.stations:
station_group = self.data.create_group(self.cluster_group,
'station_%d' %
station.number)
description = ProcessEvents.processed_events_description
self.data.create_table(station_group, 'events', description,
expectedrows=self.n)
self.station_groups.append(station_group)
def _store_station_index(self):
"""Stores the references to the station groups for coincidences"""
for station_group in self.station_groups:
self.s_index.append(station_group._v_pathname.encode('utf-8'))
self.s_index.flush()
def __repr__(self):
if not self.data.isopen:
return "<finished %s>" % self.__class__.__name__
return ('<%s, cluster: %r, data: %r, output_path: %r>' %
(self.__class__.__name__, self.cluster, self.data.filename,
self.output_path))
| 0 |
import os
import six
import stat
from fabric.network import ssh
class FakeFile(six.StringIO):
def __init__(self, value=None, path=None):
def init(x):
six.StringIO.__init__(self, x)
if value is None:
init("")
ftype = 'dir'
size = 4096
else:
init(value)
ftype = 'file'
size = len(value)
attr = ssh.SFTPAttributes()
attr.st_mode = {'file': stat.S_IFREG, 'dir': stat.S_IFDIR}[ftype]
attr.st_size = size
attr.filename = os.path.basename(path)
self.attributes = attr
def __str__(self):
return self.getvalue()
def write(self, value):
if six.PY3 is True and isinstance(value, bytes):
value = value.decode('utf-8')
six.StringIO.write(self, value)
self.attributes.st_size = len(self.getvalue())
def close(self):
"""
Always hold fake files open.
"""
pass
def __cmp__(self, other):
me = str(self) if isinstance(other, six.string_types) else self
return cmp(me, other) # noqa: F821
class FakeFilesystem(dict):
def __init__(self, d=None):
# Replicate input dictionary using our custom __setitem__
d = d or {}
for key, value in six.iteritems(d):
self[key] = value
def __setitem__(self, key, value):
if isinstance(value, six.string_types) or value is None:
value = FakeFile(value, key)
super(FakeFilesystem, self).__setitem__(key, value)
def normalize(self, path):
"""
Normalize relative paths.
In our case, the "home" directory is just the root, /.
I expect real servers do this as well but with the user's home
directory.
"""
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
return path
def __getitem__(self, key):
return super(FakeFilesystem, self).__getitem__(self.normalize(key))
| 0 |
# -*- coding: utf-8 -*-
import classes.level_controller as lc
import classes.game_driver as gd
import classes.extras as ex
import classes.board
import random
import os
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self,mainloop,99,10)
gd.BoardGame.__init__(self,mainloop,speaker,config,screen_w,screen_h,13,9)
def create_game_objects(self, level = 1):
self.board.decolorable = False
self.vis_buttons = [4,1,1,1,1,1,1,1,0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.board.draw_grid = False
s = random.randrange(150, 190, 5)
v = random.randrange(230, 255, 5)
h = random.randrange(0, 255, 5)
white = (255,255,255)
if self.mainloop.scheme is None:
color0 = ex.hsv_to_rgb(h,40,230) #highlight 1
else:
color0 = self.mainloop.scheme.u_color
if self.mainloop.scheme.dark:
white = (0,0,0)
outline_color = (150,150,150)
#setting level variable
#data = [x_count, y_count, number_count, top_limit, ordered]
if self.level.lvl == 1:
data = [13,7,5,3,2]
elif self.level.lvl == 2:
data = [13,7,8,3,3]
elif self.level.lvl == 3:
data = [12,7,7,4,2]
elif self.level.lvl == 4:
data = [12,7,11,4,3]
elif self.level.lvl == 5:
data = [12,7,15,4,4]
elif self.level.lvl == 6:
data = [13,7,9,5,2]
elif self.level.lvl == 7:
data = [13,7,14,5,3]
elif self.level.lvl == 8:
data = [13,7,19,5,4]
elif self.level.lvl == 9:
data = [12,7,11,6,2]
elif self.level.lvl == 10:
data = [12,7,17,6,3]
self.chapters = [1,5,10]
self.points = (data[3]*data[4]) // 2
#rescale the number of squares horizontally to better match the screen width
m = data[0] % 2
if m == 0:
x_count = self.get_x_count(data[1],even=True)
else:
x_count = self.get_x_count(data[1],even=False)
if x_count > data[0]:
data[0] = x_count
self.data = data
self.layout.update_layout(data[0],data[1])
self.board.level_start(data[0],data[1],self.layout.scale)
image_src = [os.path.join('memory', "n_img%da.png" % (i)) for i in range(1,22)]
self.choice_list = [x for x in range(1,data[2]+1)]
self.shuffled = self.choice_list[:]
random.shuffle(self.shuffled)
inversions = ex.inversions(self.shuffled)
if inversions % 2 != 0: #if number of inversions is odd it is unsolvable
#in unsolvable combinations swapping 2 squares will make it solvable
temp = self.shuffled[0]
self.shuffled[0]=self.shuffled[1]
self.shuffled[1]=temp
color = ((255,255,255))
h1=(data[1]-data[4])//2 #height of the top margin
h2=data[1]-h1-data[4]-1 #height of the bottom margin minus 1 (game label)
w2=(data[0]-data[3])//2 #side margin width
self.check = [h1,h2,w2]
self.board.add_door(w2,h1,data[3],data[4],classes.board.Door,"",color,"")
#create table to store 'binary' solution
#find position of first door square
x = w2
y = h1
self.mini_grid = []
#add objects to the board
line = []
h_start = random.randrange(0, 155, 5)
h_step = 100 // (data[2])
for i in range(data[2]):
h = (h_start + (self.shuffled[i]-1)*h_step)
number_color = ex.hsv_to_rgb(h,s,v) #highlight 1
caption = str(self.shuffled[i])
self.board.add_unit(x,y,1,1,classes.board.ImgShip,caption,white,image_src[self.shuffled[i]])
self.board.ships[-1].readable = False
line.append(i)
x += 1
if x >= w2+data[3] or i == data[2]-1:
x = w2
y += 1
self.mini_grid.append(line)
line=[]
self.outline_all(outline_color,1)
instruction = self.d["Re-arrange right"]
self.board.add_unit(0,data[1]-1,data[0],1,classes.board.Letter,instruction,color0,"",8)#bottom 2
self.board.ships[-1].immobilize()
if self.mainloop.scheme is not None:
self.board.ships[-1].font_color = self.mainloop.scheme.u_font_color
self.board.ships[-1].speaker_val = self.dp["Re-arrange right"]
self.board.ships[-1].speaker_val_update = False
#horizontal
self.board.add_unit(0,0,data[0],h1,classes.board.Obstacle,"",white,"",7)#top
self.board.add_unit(0,h1+data[4],data[0],h2,classes.board.Obstacle,"",white,"",7)#bottom 1
#side obstacles
self.board.add_unit(0,h1,w2,data[4],classes.board.Obstacle,"",white,"",7)#left
self.board.add_unit(w2+data[3],h1,w2,data[4],classes.board.Obstacle,"",white,"",7)#right
self.board.all_sprites_list.move_to_front(self.board.units[0])
def handle(self,event):
gd.BoardGame.handle(self, event) #send event handling up
def update(self,game):
game.fill((255,255,255))
gd.BoardGame.update(self, game) #rest of painting done by parent
def check_result(self):
ships = []
current = [x for x in range(self.data[2]+1)]#self.choice_list[:]
#collect value and x position on the grid from ships list
for i in range(len(self.board.ships)-1):
x = self.board.ships[i].grid_x-self.check[2]
y = self.board.ships[i].grid_y-self.check[0]
w = self.data[3]
h = self.data[4]
pos = x + (y*w)
current[pos]=int(self.board.ships[i].value)
del(current[-1])
if self.choice_list == current:
self.update_score(self.points)
self.level.next_board()
else:
if self.points > 0 :
self.points -= 1
self.level.try_again()
| 0.035938 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.