text
stringlengths 4
1.02M
| meta
dict |
---|---|
import ddapp.objectmodel as om
from ddapp.asynctaskqueue import AsyncTaskQueue
class GraspSearchPlanner(object):
def __init__(self, ikPlanner, robotModel, jointController, sensorJointController, planPlaybackFunction, showPoseFunction, playbackRobotModel):
self.ikPlanner = ikServer
self.robotModel = robotModel
self.jointController = jointController
self.sensorJointController = sensorJointController
self.planPlaybackFunction = planPlaybackFunction
self.showPoseFunction = showPoseFunction
self.playbackRobotModel = playbackRobotModel
self.endPoses = []
self.affordanceName = 'board'
self.affordance = None
self.handModels = []
self.reachingSide = 'left'
self.graspSample = 0
self.handToUtorso = [0.2, 0.7, 0.0]
self.planFromCurrentRobotState = True
self.tspanPreReach = [0.35, 0.35]
self.tspanFull = [0.0, 1.0]
self.tspanPreGrasp = [0.7, 0.7]
self.tspanPreGraspToEnd = [0.7, 1.0]
self.tspanStart = [0.0, 0.0]
self.tspanEnd = [1.0, 1.0]
def playManipPlan(self):
self.planPlaybackFunction([self.lastManipPlan])
def showPreGraspEndPose(self):
self.showPoseFunction(self.jointController.getPose('pre_grasp_end_pose'))
def showGraspEndPose(self):
self.showPoseFunction(self.jointController.getPose('grasp_end_pose'))
def computePreGraspTraj(self):
self.computeGraspTraj(poseStart='q_start', poseEnd='pre_grasp_end_pose', timeSamples=[0.0, 0.35, 0.7])
def computeEndGraspTraj(self):
self.computeGraspTraj(poseStart='pre_grasp_end_pose', poseEnd='grasp_end_pose', timeSamples=[0.7, 1.0])
def computeGroundFrame(self, robotModel):
'''
Given a robol model, returns a vtkTransform at a position between
the feet, on the ground, with z-axis up and x-axis aligned with the
robot pelvis x-axis.
'''
t1 = robotModel.getLinkFrame( self.ikPlanner.leftFootLink )
t2 = robotModel.getLinkFrame( self.ikPlanner.rightFootLink )
pelvisT = robotModel.getLinkFrame( self.ikPlanner.pelvisLink )
xaxis = [1.0, 0.0, 0.0]
pelvisT.TransformVector(xaxis, xaxis)
xaxis = np.array(xaxis)
zaxis = np.array([0.0, 0.0, 1.0])
yaxis = np.cross(zaxis, xaxis)
yaxis /= np.linalg.norm(yaxis)
xaxis = np.cross(yaxis, zaxis)
stancePosition = (np.array(t2.GetPosition()) + np.array(t1.GetPosition())) / 2.0
footHeight = 0.0811
t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(stancePosition)
t.Translate([0.0, 0.0, -footHeight])
return t
def randomAffordance(self, robotModel):
aff = self.findAffordance()
if aff:
om.removeFromObjectModel(aff)
self.spawnAffordance(robotModel, randomize=True)
def spawnAffordance(self, robotModel, randomize=False):
if randomize:
position = [random.uniform(0.5, 0.8), random.uniform(-0.2, 0.2), random.uniform(0.5, 0.8)]
rpy = [random.choice((random.uniform(-35, 35), random.uniform(70, 110))), random.uniform(-10, 10), random.uniform(-5, 5)]
zwidth = random.uniform(0.5, 1.0)
else:
position = [0.65, 0.0, 0.6]
rpy = [25, 1, 0]
zwidth = 24 * .0254
xwidth = 3.75 * .0254
ywidth = 1.75 * .0254
t = transformUtils.frameFromPositionAndRPY(position, rpy)
t.Concatenate(self.computeGroundFrame(robotModel))
xaxis = [1,0,0]
yaxis = [0,1,0]
zaxis = [0,0,1]
for axis in (xaxis, yaxis, zaxis):
t.TransformVector(axis, axis)
affordance = segmentation.createBlockAffordance(t.GetPosition(), xaxis, yaxis, zaxis, xwidth, ywidth, zwidth, 'board', parent='affordances')
affordance.setProperty('Color', QtGui.QColor(200, 150, 100))
t = affordance.actor.GetUserTransform()
affordanceFrame = vis.showFrame(t, 'board frame', parent=affordance, visible=False, scale=0.2)
def updateHandModel(self):
graspFrame = self.getAffordanceChild('desired grasp frame')
handMesh = self.findAffordanceChild('desired grasp hand')
if not handMesh:
handMesh = self.getHandModel().newPolyData('desired grasp hand', self.robotModel.views[0], parent=self.findAffordance())
handFrame = handMesh.children()[0]
handFrame.copyFrame(graspFrame.transform)
def findAffordance(self):
self.affordance = om.findObjectByName(self.affordanceName)
return self.affordance
def findAffordanceChild(self, name):
assert self.affordance
return self.affordance.findChild(name)
def getAffordanceChild(self, name):
child = self.findAffordanceChild(name)
if not child:
raise Exception('Failed to locate affordance child: %s' % name)
return child
def getAffordanceFrame(self):
self.findAffordance()
assert self.affordance
affordanceName = self.affordance.getProperty('Name')
return self.getAffordanceChild('%s frame' % affordanceName)
def computeGraspFrameSamples(self):
if self.affordanceName == 'board':
self.computeGraspFrameSamplesBoard()
else:
self.getAffordanceChild('sample grasp frame 0')
def computeGraspFrameSamplesBoard(self):
affordanceFrame = self.getAffordanceFrame()
additionalOffset = 0.0
yoffset = 0.5*self.affordance.params['ywidth'] + additionalOffset
xoffset = 0.5*self.affordance.params['xwidth'] + additionalOffset
frames = [
[[0.0, yoffset, 0.0], [0.0, 90, 180.0]],
[[0.0, yoffset, 0.0], [0.0, -90, 180.0]],
[[0.0, -yoffset, 0.0], [0.0, 90, 0.0]],
[[0.0, -yoffset, 0.0], [0.0, -90, 0.0]],
[[xoffset, 0.0, 0.0], [-90, -90, 180.0]],
[[xoffset, 0.0, 0.0], [90, 90, 180.0]],
[[-xoffset, 0.0, 0.0], [90, -90, 180.0]],
[[-xoffset, 0.0, 0.0], [-90, 90, 180.0]],
]
for i, frame in enumerate(frames):
pos, rpy = frame
t = transformUtils.frameFromPositionAndRPY(pos, rpy)
t.Concatenate(affordanceFrame.transform)
name = 'sample grasp frame %d' % i
om.removeFromObjectModel(self.findAffordanceChild(name))
vis.showFrame(copyFrame(t), name, parent=self.affordance, visible=False, scale=0.2)
def computeGraspFrame(self):
frame = self.getAffordanceChild('sample grasp frame %d' % self.graspSample)
name = 'grasp frame'
om.removeFromObjectModel(self.findAffordanceChild(name))
vis.showFrame(copyFrame(frame.transform), name, parent=self.affordance, visible=False, scale=0.2)
def createSearchGraspConstraints(self):
if self.affordanceName == 'board':
return self.createSearchGraspConstraintsBoard()
else:
targetFrame = self.getAffordanceChild('grasp frame')
return self.createPositionOrientationGraspConstraints(self.reachingSide, targetFrame, positionTolerance=0.0025, angleToleranceInDegrees=1.0)
def createSearchGraspConstraintsBoard(self):
targetFrame = self.getAffordanceChild('grasp frame')
boardHalfLength = self.affordance.params['zwidth']/2.0 - 0.08
graspPosition, graspOrientation = self.createPositionOrientationGraspConstraints(self.reachingSide, targetFrame, positionTolerance=0.0025, angleToleranceInDegrees=1.0)
graspPosition.lowerBound = np.array([-boardHalfLength, 0.0, 0.0])
graspPosition.upperBound = np.array([boardHalfLength, 0.0, 0.0])
return graspPosition, graspOrientation
def createRetractGraspConstraints(self):
targetFrame = self.getAffordanceChild('desired grasp frame')
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(targetFrame.transform)
t.Translate(0.0, 0.0, 0.25)
retractFrame = vis.updateFrame(copyFrame(t), 'retract frame', scale=0.2, visible=False, parent=self.affordance)
return self.createPositionOrientationGraspConstraints(self.reachingSide, retractFrame, positionTolerance=0.03, angleToleranceInDegrees=5.0)
def createGraspConstraints(self):
targetFrame = self.getAffordanceChild('desired grasp frame')
return self.createPositionOrientationGraspConstraints(self.reachingSide, targetFrame, positionTolerance=0.005, angleToleranceInDegrees=3.0)
def createPreGraspConstraints(self):
targetFrame = self.getAffordanceChild('pre grasp frame')
return self.createPositionOrientationGraspConstraints(self.reachingSide, targetFrame, positionTolerance=0.02, angleToleranceInDegrees=7.0)
def createPreReachConstraint(self):
handToUtorso = np.array(self.handToUtorso)
if self.reachingSide == 'right':
handToUtorso[1] *= -1
return self.createHandRelativePositionConstraint(self, self.reachSide, 'utorso', handToUtorso)
def computeGraspEndPoseSearch(self):
startPoseName = 'q_start'
constraints = []
constraints.extend(self.createSearchGraspConstraints())
constraints.extend(self.createMovingReachConstraints(startPoseName))
self.graspEndPose, self.graspEndPoseInfo = self.ikServer.runIk(constraints)
self.ikServer.sendPoseToServer(self.graspEndPose, 'grasp_end_pose')
self.jointController.setPose('grasp_end_pose', self.graspEndPose)
print 'grasp end pose info:', self.graspEndPoseInfo
def computeGraspEndPoseFrames(self):
graspFrame = self.getAffordanceChild('grasp frame')
affordanceFrame = self.getAffordanceFrame()
self.jointController.setPose('grasp_end_pose', self.graspEndPose)
handFrame = self.robotModel.getLinkFrame(self.getHandLink())
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(self.getPalmToHandLink())
t.Concatenate(handFrame)
graspEndPoseFrame = t
vis.updateFrame(t, 'grasp frame (ik result with tolerance)', scale=0.2, visible=False, parent=self.affordance)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(graspFrame.transform)
t.Translate(np.array(graspEndPoseFrame.GetPosition()) - np.array(graspFrame.transform.GetPosition()))
t.Concatenate(affordanceFrame.transform.GetLinearInverse())
self.affordanceToGrasp = copyFrame(t)
def updateAffordanceToGrasp(frame):
affordanceFrame = self.getAffordanceFrame()
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(frame.transform)
t.Concatenate(affordanceFrame.transform.GetLinearInverse())
self.affordanceToGrasp = copyFrame(t)
self.updateHandModel()
def updateGraspFrame(frame, create=False):
graspFrame = self.findAffordanceChild('desired grasp frame')
if not graspFrame and not create:
frame.onTransformModifiedCallback = None
return
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(self.affordanceToGrasp)
t.Concatenate(frame.transform)
if graspFrame:
graspFrame.onTransformModifiedCallback = None
graspFrame = vis.updateFrame(copyFrame(t), 'desired grasp frame', scale=0.2, visible=False, parent=self.affordance)
graspFrame.onTransformModifiedCallback = updateAffordanceToGrasp
self.updateHandModel()
return graspFrame
self.lockAffordanceToHand = False
def onRobotModelChanged(model):
handFrame = self.playbackRobotModel.getLinkFrame(self.getHandLink())
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(self.getPalmToHandLink())
t.Concatenate(handFrame)
palmFrame = vis.updateFrame(t, 'palm frame', scale=0.2, visible=False, parent=self.affordance)
if self.lockAffordanceToHand:
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(self.affordanceToGrasp.GetLinearInverse())
t.Concatenate(palmFrame.transform)
affordanceFrame = self.getAffordanceFrame()
affordanceFrame.copyFrame(t)
self.playbackRobotModel.connectModelChanged(onRobotModelChanged)
graspFrame = updateGraspFrame(affordanceFrame, create=True)
affordanceFrame.onTransformModifiedCallback = updateGraspFrame
def computePreGraspFrame(self, preGraspDistance=0.20):
graspFrame = self.getAffordanceChild('desired grasp frame')
pos = [0.0, -preGraspDistance, 0.0]
rpy = [0.0, 0.0, 0.0]
preGraspToGrasp = transformUtils.frameFromPositionAndRPY(pos, rpy)
t = vtk.vtkTransform()
t.PostMultiply()
t.Concatenate(preGraspToGrasp)
t.Concatenate(graspFrame.transform)
vis.updateFrame(copyFrame(t), 'pre grasp frame', scale=0.2, visible=False, parent=self.affordance)
def computeGraspEndPose(self):
startPoseName = 'q_start'
constraints = []
constraints.extend(self.createMovingReachConstraints(startPoseName))
constraints.extend(self.createGraspConstraints())
self.graspEndPose, info = self.ikServer.runIk(constraints)
self.ikServer.sendPoseToServer(self.graspEndPose, 'grasp_end_pose')
self.jointController.setPose('grasp_end_pose', self.graspEndPose)
print 'grasp end pose info:', info
def commitState(self):
poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(self.lastManipPlan)
self.sensorJointController.setPose('EST_ROBOT_STATE', poses[-1])
def computePreGraspAdjustment(self):
assert self.planFromCurrentRobotState
startPose = np.array(self.sensorJointController.q)
startPoseName = 'reach_start'
self.jointController.addPose(startPoseName, startPose)
self.computePostureGoal(startPoseName, 'pre_grasp_end_pose')
def computeGraspReach(self):
if self.planFromCurrentRobotState:
startPose = np.array(self.sensorJointController.q)
else:
startPose = np.array(self.jointController.getPose('pre_grasp_end_pose'))
startPoseName = 'reach_start'
self.jointController.addPose(startPoseName, startPose)
self.ikServer.sendPoseToServer(startPose, startPoseName)
constraints = []
constraints.extend(self.createGraspConstraints())
constraints.append(self.createLockedTorsoPostureConstraint(startPoseName))
constraints.append(self.createLockedArmPostureConstraint(startPoseName))
endPose, info = self.ikServer.runIk(constraints, seedPostureName=startPoseName)
print 'grasp reach info:', info
self.jointController.addPose('reach_end', endPose)
self.computePostureGoal(startPoseName, 'reach_end')
def computeRetractTraj(self):
if self.planFromCurrentRobotState:
startPose = np.array(self.sensorJointController.q)
else:
startPose = np.array(self.jointController.getPose('grasp_end_pose'))
startPoseName = 'retract_start'
self.jointController.addPose(startPoseName, startPose)
self.ikServer.sendPoseToServer(startPose, startPoseName)
constraints = []
constraints.extend(self.createMovingReachConstraints(startPoseName))
graspPosition, graspOrientation = self.createRetractGraspConstraints()
graspPosition.tspan = self.tspanEnd
graspOrientation.tspan = self.tspanEnd
constraints.extend([
graspPosition,
graspOrientation,
])
endPose, info = self.ikServer.runIk(constraints, seedPostureName=startPoseName)
print 'retract info:', info
self.jointController.addPose('retract_end', endPose)
self.computePostureGoal(startPoseName, 'retract_end')
#self.runIkTraj(constraints, startPoseName, startPoseName, timeSamples)
def computeArmExtend(self):
if self.planFromCurrentRobotState:
startPose = np.array(self.sensorJointController.q)
else:
startPose = np.array(self.jointController.getPose('grasp_end_pose'))
startPoseName = 'retract_start'
self.jointController.addPose(startPoseName, startPose)
self.ikServer.sendPoseToServer(startPose, startPoseName)
constraints = []
constraints.extend(self.createFixedFootConstraints(startPoseName))
constraints.append(self.createKneePostureConstraint([0.4, 0.4]))
constraints.append(self.createMovingBasePostureConstraint(startPoseName))
constraints.append(self.createLockedArmPostureConstraint(startPoseName))
constraints.append(self.createPostureConstraint('q_nom', robotstate.matchJoints('back')))
movingArmJoints = 'l_arm' if self.reachingSide == 'left' else 'r_arm'
constraints.append(self.createPostureConstraint('q_zero', robotstate.matchJoints(movingArmJoints)))
endPose, info = self.ikServer.runIk(constraints, seedPostureName=startPoseName)
print 'retract info:', info
self.jointController.addPose('retract_end', endPose)
self.computePostureGoal(startPoseName, 'retract_end')
def computePreGraspEndPose(self):
constraints = []
constraints.extend(self.createPreGraspConstraints())
constraints.extend(self.createMovingReachConstraints('grasp_end_pose', lockBack=True, lockBase=True, lockArm=True))
self.preGraspEndPose, self.preGraspEndPoseInfo = self.ikServer.runIk(constraints)
self.ikServer.sendPoseToServer(self.preGraspEndPose, 'pre_grasp_end_pose')
self.jointController.addPose('pre_grasp_end_pose', self.preGraspEndPose)
print 'pre grasp end pose info:', self.preGraspEndPoseInfo
def computeGraspTraj(self, poseStart='q_start', poseEnd='grasp_end_pose', timeSamples=None):
constraints = []
constraints.extend(self.createMovingReachConstraints(poseStart))
movingBaseConstraint = constraints[-2]
assert isinstance(movingBaseConstraint, ik.PostureConstraint)
assert 'base_x' in movingBaseConstraint.joints
movingBaseConstraint.tspan = [self.tspanStart[0], self.tspanPreGrasp[1]]
preReachPosition = self.createPreReachConstraint()
preReachPosition.tspan = self.tspanPreReach
graspPosture = self.createLockedTorsoPostureConstraint('grasp_end_pose')
graspPosture.tspan = self.tspanPreGraspToEnd
preGraspPosition, preGraspOrientation = self.createPreGraspConstraints()
preGraspPosition.tspan = self.tspanPreGrasp
preGraspOrientation.tspan = self.tspanPreGrasp
graspPosition, graspOrientation = self.createGraspConstraints()
graspPosition.tspan = self.tspanEnd
graspOrientation.tspan = self.tspanEnd
constraints.extend([
preReachPosition,
graspPosture,
preGraspPosition,
preGraspOrientation,
graspPosition,
graspOrientation,
])
if timeSamples is None:
timeSamples=[0.0, 0.35, 0.7, 1.0]
self.runIkTraj(constraints, poseStart, poseEnd, timeSamples)
def useGraspEndPoseOption(self, index):
side, graspSample = self.endPoses[index][3]
self.reachingSide = side
self.graspSample = graspSample
self.updateGraspEndPose()
self.showGraspEndPose()
def computeInitialState(self):
if self.planFromCurrentRobotState:
startPose = np.array(self.sensorJointController.q)
else:
startPose = np.array(self.jointController.getPose('q_nom'))
self.ikServer.sendPoseToServer(startPose, 'q_start')
self.jointController.addPose('q_start', startPose)
def updateGraspEndPose(self, enableSearch=True):
self.computeInitialState()
self.findAffordance()
if enableSearch:
om.removeFromObjectModel(self.findAffordanceChild('desired grasp frame'))
om.removeFromObjectModel(self.findAffordanceChild('desired grasp hand'))
if not self.findAffordanceChild('desired grasp frame'):
self.computeGraspFrameSamples()
self.computeGraspFrame()
self.computeGraspEndPoseSearch()
self.computeGraspEndPoseFrames()
else:
self.computeGraspEndPose()
self.computePreGraspFrame()
self.computePreGraspEndPose()
def endPoseSearch(self):
self.findAffordance()
self.computeGraspFrameSamples()
self.endPoses = []
for side in ['left', 'right']:
#for side in ['left']:
sampleCount = 0
while self.findAffordanceChild('sample grasp frame %d' % sampleCount):
self.reachingSide = side
self.graspSample = sampleCount
sampleCount += 1
self.updateGraspEndPose()
if self.graspEndPoseInfo == 1 and self.preGraspEndPoseInfo == 1:
params = [self.reachingSide, self.graspSample]
score = self.computePostureCost(self.graspEndPose)
print 'score:', score
print 'params:', self.reachingSide, self.graspSample
self.endPoses.append((score, self.graspEndPose, self.preGraspEndPose, params))
if not self.endPoses:
print 'failed to find suitable grasp end pose'
return 0
self.endPoses.sort(key=lambda x: x[0])
self.useGraspEndPoseOption(0)
print '\n\nfound %d suitable end poses' % len(self.endPoses)
return len(self.endPoses)
class DebrisPlannerDemo(object):
def __init__(self, robotStateModel, sensorJointController, playbackRobotModel, ikPlanner, manipPlanner, atlasDriver, handDriver, multisenseDriver, refitFunction):
self.robotStateModel = robotStateModel
self.sensorJointController = sensorJointController
self.playbackRobotModel = playbackRobotModel
self.ikPlanner = ikPlanner
self.manipPlanner = manipPlanner
self.atlasDriver = atlasDriver
self.handDriver = handDriver
self.multisenseDriver = multisenseDriver
self.refitFunction = refitFunction
self.planFromCurrentRobotState = True
self.userPromptEnabled = True
self.visOnly = True
def reset(self):
self.ikPlanner.lockAffordanceToHand = False
self.ikPlanner.randomAffordance(self.robotStateModel)
def playManipPlan(self):
self.playManipPlan()
self.robotStateModel.setProperty('Alpha', 0.1)
def search(self):
self.ikPlanner.endPoseSearch()
self.robotStateModel.setProperty('Alpha', 0.1)
def preGrasp(self):
self.ikPlanner.updateGraspEndPose(enableSearch=False)
self.ikPlanner.computePreGraspTraj()
self.playManipPlan()
def adjustPreGrasp(self):
self.ikPlanner.updateGraspEndPose(enableSearch=False)
self.ikPlanner.computePreGraspAdjustment()
self.playManipPlan()
def grasp(self):
self.ikPlanner.computeGraspReach()
self.playManipPlan()
def retract(self):
self.ikPlanner.lockAffordanceToHand = True
self.ikPlanner.computeRetractTraj()
self.playManipPlan()
def stand(self):
self.ikPlanner.lockAffordanceToHand = True
startPose = self.getPlanningStartPose()
self.ikPlanner.computeStandPlan(startPose)
self.playManipPlan()
def extendArm(self):
self.ikPlanner.lockAffordanceToHand = True
self.ikPlanner.computeArmExtend()
self.playManipPlan()
def drop(self):
self.ikPlanner.lockAffordanceToHand = False
om.removeFromObjectModel(self.ikPlanner.affordance)
self.nominal()
def nominal(self):
startPose = self.getPlanningStartPose()
self.ikPlanner.computeNominalPlan(startPose)
self.playManipPlan()
def toggleAffordanceEdit(self):
aff = self.ikPlanner.findAffordance()
frame = self.ikPlanner.getAffordanceFrame()
edit = not frame.getProperty('Edit')
frame.setProperty('Edit', edit)
aff.setProperty('Alpha', 0.5 if edit else 1.0)
def getEstimatedRobotStatePose(self):
return self.sensorJointController.getPose('EST_ROBOT_STATE')
def getPlanningStartPose(self):
if self.planFromCurrentRobotState:
return self.getEstimatedRobotStatePose()
else:
assert False
def commit(self):
if self.visOnly:
self.ikPlanner.commitState()
self.clearPlan()
else:
self.manipPlanner.commitManipPlan(self.ikPlanner.lastManipPlan)
self.clearPlan()
def clearPlan(self):
self.ikPlanner.lastManipPlan = None
self.robotStateModel.setProperty('Alpha', 1.0)
self.playbackRobotModel.setProperty('Visible', False)
def useEndPose(self, index):
self.ikPlanner.useGraspEndPoseOption(index)
def sendHeightMode(self):
self.atlasDriver.sendPlanUsingBdiHeight(True)
def openHand(self):
self.handDriver.sendOpen()
def closeHand(self):
self.handDriver.sendClose()
def sendPelvisCrouch(self):
self.atlasDriver.sendPelvisHeightCommand(0.7)
def sendPelvisStand(self):
self.atlasDriver.sendPelvisHeightCommand(0.8)
def sendNeckPitchLookDown(self):
self.multisenseDriver.setNeckPitch(40)
def spinLidar(self):
self.multisenseDriver.setLidarRevolutionTime(10)
def sendNeckPitchLookForward(self):
self.multisenseDriver.setNeckPitch(15)
def waitForAtlasBehaviorAsync(self, behaviorName):
assert behaviorName in self.atlasDriver.getBehaviorMap().values()
while self.atlasDriver.getCurrentBehaviorName() != behaviorName:
yield
def printAsync(self, s):
yield
print s
def userPrompt(self, message):
if not self.userPromptEnabled:
return
yield
result = raw_input(message)
if result != 'y':
raise Exception('user abort.')
def delay(self, delayTimeInSeconds):
yield
t = SimpleTimer()
while t.elapsed() < delayTimeInSeconds:
yield
def pauseQueue(self):
raise AsyncTaskQueue.PauseException()
def waitForCleanLidarSweepAsync(self):
currentRevolution = self.multisenseDriver.displayedRevolution
desiredRevolution = currentRevolution + 2
while self.multisenseDriver.displayedRevolution < desiredRevolution:
yield
def autonomousExecute(self):
taskQueue = AsyncTaskQueue()
# require affordance at start
taskQueue.addTask(self.printAsync('get affordance'))
taskQueue.addTask(self.ikPlanner.getAffordanceFrame)
# stand, open hand, manip
taskQueue.addTask(self.printAsync('send behavior start commands'))
taskQueue.addTask(self.userPrompt('stand and open hand. continue? y/n: '))
taskQueue.addTask(self.atlasDriver.sendManipCommand)
taskQueue.addTask(self.openHand)
taskQueue.addTask(self.delay(3.0))
taskQueue.addTask(self.sendHeightMode)
taskQueue.addTask(self.atlasDriver.sendManipCommand)
# user prompt
taskQueue.addTask(self.userPrompt('sending neck pitch down. continue? y/n: '))
# set neck pitch
taskQueue.addTask(self.printAsync('neck pitch down'))
taskQueue.addTask(self.sendNeckPitchLookDown)
taskQueue.addTask(self.delay(1.0))
# user prompt
taskQueue.addTask(self.userPrompt('perception and fitting. continue? y/n: '))
# perception & fitting
#taskQueue.addTask(self.printAsync('waiting for clean lidar sweep'))
#taskQueue.addTask(self.waitForCleanLidarSweepAsync)
#taskQueue.addTask(self.printAsync('user fit affordance'))
#taskQueue.addTask(self.pauseQueue)
# compute grasp & stance
taskQueue.addTask(self.printAsync('grasp search'))
taskQueue.addTask(self.search)
# user select end pose
taskQueue.addTask(self.printAsync('user select end pose'))
#taskQueue.addTask(self.pauseQueue)
# compute pre grasp plan
taskQueue.addTask(self.preGrasp)
# user prompt
taskQueue.addTask(self.userPrompt('commit manip plan. continue? y/n: '))
# commit pre grasp plan
taskQueue.addTask(self.printAsync('commit pre grasp plan'))
taskQueue.addTask(self.commit)
taskQueue.addTask(self.delay(10.0))
# perception & fitting
taskQueue.addTask(self.printAsync('user fit affordance'))
taskQueue.addTask(self.toggleAffordanceEdit)
taskQueue.addTask(self.pauseQueue)
#taskQueue.addTask(self.printAsync('waiting for clean lidar sweep'))
#taskQueue.addTask(self.waitForCleanLidarSweepAsync)
#taskQueue.addTask(self.printAsync('refit affordance'))
#taskQueue.addTask(self.refitFunction)
# compute pre grasp plan
taskQueue.addTask(self.grasp)
# user prompt
taskQueue.addTask(self.userPrompt('commit manip plan. continue? y/n: '))
# commit pre grasp plan
taskQueue.addTask(self.printAsync('commit grasp plan'))
taskQueue.addTask(self.commit)
taskQueue.addTask(self.delay(10.0))
# user prompt
taskQueue.addTask(self.userPrompt('closing hand. continue? y/n: '))
# close hand
taskQueue.addTask(self.printAsync('close hand'))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(2.0))
taskQueue.addTask(self.closeHand)
# compute retract plan
taskQueue.addTask(self.retract)
# user prompt
taskQueue.addTask(self.userPrompt('commit manip plan. continue? y/n: '))
# commit pre grasp plan
taskQueue.addTask(self.printAsync('commit retract plan'))
taskQueue.addTask(self.commit)
taskQueue.addTask(self.delay(0.1))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(0.1))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(0.2))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(0.2))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(0.2))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(0.2))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(1.0))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(1.0))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(1.0))
taskQueue.addTask(self.closeHand)
taskQueue.addTask(self.delay(6.0))
# compute extend arm plan
taskQueue.addTask(self.extendArm)
# user prompt
taskQueue.addTask(self.userPrompt('commit manip plan. continue? y/n: '))
# commit pre grasp plan
taskQueue.addTask(self.printAsync('commit extend arm plan'))
taskQueue.addTask(self.commit)
taskQueue.addTask(self.delay(10.0))
# user prompt
taskQueue.addTask(self.userPrompt('opening hand. continue? y/n: '))
# open hand
taskQueue.addTask(self.printAsync('open hand'))
taskQueue.addTask(self.openHand)
taskQueue.addTask(self.delay(2.0))
# compute nominal pose plan
taskQueue.addTask(self.drop)
# user prompt
taskQueue.addTask(self.userPrompt('commit manip plan. continue? y/n: '))
# commit pre grasp plan
taskQueue.addTask(self.printAsync('commit nominal pose plan'))
taskQueue.addTask(self.commit)
taskQueue.addTask(self.delay(10.0))
taskQueue.addTask(self.printAsync('done!'))
return taskQueue
|
{
"content_hash": "f015f007fc73b8e870bd109881a7056c",
"timestamp": "",
"source": "github",
"line_count": 923,
"max_line_length": 175,
"avg_line_length": 35.070422535211264,
"alnum_prop": 0.6647513129440841,
"repo_name": "gizatt/director",
"id": "31003390b18a2e5ed73f53bc6b4ed1b60fe47cb1",
"size": "32370",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/python/ddapp/debrisdemo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "119719"
},
{
"name": "C++",
"bytes": "386403"
},
{
"name": "CMake",
"bytes": "48084"
},
{
"name": "GLSL",
"bytes": "15443"
},
{
"name": "MATLAB",
"bytes": "144018"
},
{
"name": "Makefile",
"bytes": "4876"
},
{
"name": "Python",
"bytes": "1994096"
},
{
"name": "Shell",
"bytes": "1337"
}
],
"symlink_target": ""
}
|
"""Serializers for channels_fields"""
import copy
import logging
from typing import List
from django.contrib.auth import get_user_model
from django.db import transaction
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from channels.serializers.channels import ChannelAppearanceMixin
from channels.serializers.validators import validate_email, validate_username
from channels_fields.api import add_user_role, is_moderator
from channels_fields.constants import FIELD_ROLE_MODERATORS
from channels_fields.models import FieldChannel, FieldList, Subfield
from course_catalog.constants import PrivacyLevel
from course_catalog.models import UserList
from course_catalog.serializers import UserListSerializer
from open_discussions.serializers import WriteableSerializerMethodField
from profiles.models import Profile
User = get_user_model()
log = logging.getLogger(__name__)
class SubfieldSerializer(serializers.ModelSerializer):
"""Serializer for Subfields"""
parent_field = serializers.SlugRelatedField(
many=False, read_only=True, slug_field="name"
)
field_channel = serializers.SlugRelatedField(
many=False, read_only=True, slug_field="name"
)
class Meta:
model = Subfield
fields = ("parent_field", "field_channel", "position")
class FieldChannelSerializer(ChannelAppearanceMixin, serializers.ModelSerializer):
"""Serializer for FieldChannel"""
lists = serializers.SerializerMethodField()
featured_list = UserListSerializer(many=False, read_only=True)
subfields = SubfieldSerializer(many=True, read_only=True)
is_moderator = serializers.SerializerMethodField()
def get_is_moderator(self, instance):
"""Return true if user is a moderator for the channel"""
request = self.context.get("request")
if request and is_moderator(request.user, instance.name):
return True
return False
def get_lists(self, instance):
"""Returns the field's list of UserLists"""
return [
UserListSerializer(field_list.field_list).data
for field_list in instance.lists.all().order_by("position")
]
class Meta:
model = FieldChannel
fields = (
"name",
"title",
"about",
"public_description",
"subfields",
"featured_list",
"lists",
"about",
"avatar",
"avatar_medium",
"avatar_small",
"banner",
"widget_list",
"updated_on",
"created_on",
"id",
"ga_tracking_id",
"is_moderator",
)
read_only_fields = fields
class FieldChannelCreateSerializer(serializers.ModelSerializer):
"""Write serializer for FieldChannel"""
featured_list = serializers.PrimaryKeyRelatedField(
many=False,
allow_null=True,
allow_empty=True,
required=False,
queryset=UserList.objects.filter(privacy_level=PrivacyLevel.public.value),
)
lists = WriteableSerializerMethodField()
subfields = WriteableSerializerMethodField()
class Meta:
model = FieldChannel
fields = (
"name",
"title",
"public_description",
"subfields",
"featured_list",
"lists",
"about",
)
def validate_lists(self, lists: List[int]):
"""Validator for lists"""
if len(lists) > 0:
try:
valid_list_ids = set(
UserList.objects.filter(
id__in=lists, privacy_level=PrivacyLevel.public.value
).values_list("id", flat=True)
)
except (ValueError, TypeError):
raise ValidationError("List ids must be integers")
missing = set(lists).difference(valid_list_ids)
if missing:
raise ValidationError(f"Invalid list ids: {missing}")
return {"lists": lists}
def get_lists(self, instance):
"""Returns the field's list of UserLists"""
return [
UserListSerializer(field_list.field_list).data
for field_list in instance.lists.all()
.prefetch_related("field_list", "field_channel")
.order_by("position")
]
def validate_subfields(self, subfields: List[str]):
"""Validator for subfields"""
if len(subfields) > 0:
try:
valid_subfield_names = set(
FieldChannel.objects.filter(name__in=subfields).values_list(
"name", flat=True
)
)
missing = set(subfields).difference(valid_subfield_names)
if missing:
raise ValidationError(f"Invalid subfield names: {missing}")
except (ValueError, TypeError):
raise ValidationError("Subfields must be strings")
return {"subfields": subfields}
def get_subfields(self, instance):
"""Returns the list of topics"""
return [
SubfieldSerializer(subfield).data
for subfield in instance.subfields.all()
.prefetch_related("field_channel")
.order_by("position")
]
def upsert_field_lists(self, instance, validated_data):
"""Update or create field lists for a new or updated field channel"""
if "lists" not in validated_data:
return
field_lists = validated_data.pop("lists")
new_lists = set()
former_lists = list(instance.lists.values_list("id", flat=True))
for (idx, list_pk) in enumerate(field_lists):
userlist = UserList.objects.filter(pk=list_pk).first()
if userlist:
field_list, _ = FieldList.objects.update_or_create(
field_channel=instance,
field_list=userlist,
defaults={"position": idx},
)
new_lists.add(field_list)
removed_lists = list(set(former_lists) - {list.id for list in new_lists})
with transaction.atomic():
instance.lists.set(new_lists)
instance.lists.filter(id__in=removed_lists).delete()
def upsert_subfields(self, instance, validated_data):
"""Update or create subfields for a new or updated field channel"""
if "subfields" not in validated_data:
return
subfields = validated_data.pop("subfields")
new_subfields = set()
former_subfields = list(
instance.subfields.values_list("field_channel__name", flat=True)
)
for (idx, field_name) in enumerate(subfields):
field_channel = FieldChannel.objects.filter(name=field_name).first()
if field_channel and field_channel.pk != instance.pk:
subfield, _ = Subfield.objects.update_or_create(
parent_channel=instance,
field_channel=field_channel,
defaults={"position": idx},
)
new_subfields.add(subfield)
removed_subfields = list(
set(former_subfields)
- {subfield.field_channel.name for subfield in new_subfields}
)
with transaction.atomic():
instance.subfields.set(new_subfields)
instance.subfields.filter(
field_channel__name__in=removed_subfields
).delete()
def create(self, validated_data):
base_field_data = copy.deepcopy(validated_data)
for key in ("subfields", "lists"):
base_field_data.pop(key, None)
with transaction.atomic():
field_channel = super().create(base_field_data)
self.upsert_field_lists(field_channel, validated_data)
self.upsert_subfields(field_channel, validated_data)
return field_channel
class FieldChannelWriteSerializer(FieldChannelCreateSerializer, ChannelAppearanceMixin):
"""Similar to FieldChannelCreateSerializer, with read-only name"""
class Meta:
model = FieldChannel
fields = (
"name",
"title",
"public_description",
"subfields",
"featured_list",
"lists",
"about",
"avatar",
"banner",
)
read_only_fields = ("name",)
def update(self, instance, validated_data):
"""Update an existing field channel"""
self.upsert_field_lists(instance, validated_data)
self.upsert_subfields(instance, validated_data)
avatar = validated_data.pop("avatar", None)
if avatar:
instance.avatar.save(
f"field_channel_avatar_{instance.name}.jpg", avatar, save=False
)
instance.save(update_fields=["avatar"])
banner = validated_data.pop("banner", None)
if banner:
instance.banner.save(
f"field_channel_banner_{instance.name}.jpg", banner, save=False
)
instance.save(update_fields=["banner"])
return super().update(instance, validated_data)
class FieldModeratorSerializer(serializers.Serializer):
"""Serializer for moderators"""
moderator_name = WriteableSerializerMethodField()
email = WriteableSerializerMethodField()
full_name = serializers.SerializerMethodField()
def get_moderator_name(self, instance):
"""Returns the name for the moderator"""
return instance.username
def get_email(self, instance):
"""Get the email from the associated user"""
return (
User.objects.filter(username=instance.username)
.values_list("email", flat=True)
.first()
)
def get_full_name(self, instance):
"""Get the full name of the associated user"""
return (
Profile.objects.filter(user__username=instance.username)
.values_list("name", flat=True)
.first()
)
def validate_moderator_name(self, value):
"""Validate moderator name"""
return {"moderator_name": validate_username(value)}
def validate_email(self, value):
"""Validate email"""
return {"email": validate_email(value)}
def create(self, validated_data):
field_name = self.context["view"].kwargs["field_name"]
moderator_name = validated_data.get("moderator_name")
email = validated_data.get("email")
if email and moderator_name:
raise ValueError("Only one of moderator_name, email should be specified")
if moderator_name:
username = moderator_name
elif email:
username = User.objects.get(email__iexact=email).username
else:
raise ValueError("Missing moderator_name or email")
user = User.objects.get(username=username)
add_user_role(
FieldChannel.objects.get(name=field_name), FIELD_ROLE_MODERATORS, user
)
return user
|
{
"content_hash": "136f3979d19eaab832b53643a6f0a4f0",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 88,
"avg_line_length": 35.08150470219436,
"alnum_prop": 0.5983379501385041,
"repo_name": "mitodl/open-discussions",
"id": "a0312313a991219c760cce2c8559e32cd13ddb99",
"size": "11191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "channels_fields/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
}
|
from pyzabbix import ZabbixAPI
import sys
username="wentian"
password="65107318"
zapi = ZabbixAPI("http://zabbix.ustack.com")
zapi.login(username, password)
print "Connected To Zabbix API Version %s" % zapi.api_version()
hostgroup_id = sys.argv[1]
status = int(sys.argv[2])
def get_hostgroup_id(hostgroup_name):
search = {"name": hostgroup_name}
hostgroups = zapi.hostgroup.get(search=search)
if len(hostgroups) > 0:
return hostgroups[0]['groupid']
def get_hosts(groupid):
groupids = [groupid]
hosts = zapi.host.get(groupids=groupids)
return hosts
def update_machine_set_status(hosts, status=0):
result_hosts = []
for host in hosts:
result_hosts.append({"hostid": host["hostid"]})
zapi.host.massupdate(hosts=result_hosts, status=status)
if __name__ == "__main__":
hosts = get_hosts(get_hostgroup_id(hostgroup_id))
update_machine_set_status(hosts, status)
|
{
"content_hash": "e6abf85719366631f4a0ed55f179ebb2",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 64,
"avg_line_length": 27.96969696969697,
"alnum_prop": 0.6890574214517876,
"repo_name": "zhaozhilong1993/demon",
"id": "caa79dbfed081a807510debd41511326ef86661b",
"size": "942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "roles/software/files/reset_zabbix_hosts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "480"
},
{
"name": "HTML",
"bytes": "162226"
},
{
"name": "Pascal",
"bytes": "3882"
},
{
"name": "Puppet",
"bytes": "616997"
},
{
"name": "Python",
"bytes": "102605"
},
{
"name": "Ruby",
"bytes": "2126480"
},
{
"name": "Shell",
"bytes": "15608"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from nose.tools import ok_, eq_
from ..forms import ReportForm
from ..models import Report
class ReportFormTest(TestCase):
def tearDown(self):
Report.objects.all().delete()
def test_invalid_data(self):
data = {'url': 'random', 'description': 'Invalid'}
form = ReportForm(data)
eq_(form.is_valid(), False)
def test_invalid_domain(self):
data = {'url': 'http://invalid.mozillapopcorn.org/inappropriate/',
'description': 'This is outrageous!'}
form = ReportForm(data)
eq_(form.is_valid(), False)
def test_valid_data(self):
data = {'url': 'http://test.mozillapopcorn.org/inappropriate/',
'description': 'This is outrageous!'}
form = ReportForm(data)
ok_(form.is_valid())
|
{
"content_hash": "4be261509cd0770213543137876516a4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 28.82758620689655,
"alnum_prop": 0.6088516746411483,
"repo_name": "mozilla/popcorn_maker",
"id": "b37f772e294d11ab14dd77b621bc8e5a7bce6683",
"size": "836",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "popcorn_gallery/reports/tests/forms_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3779620"
},
{
"name": "Puppet",
"bytes": "11668"
},
{
"name": "Python",
"bytes": "5113791"
},
{
"name": "Ruby",
"bytes": "1970"
},
{
"name": "Shell",
"bytes": "2419"
}
],
"symlink_target": ""
}
|
from __future__ import (unicode_literals, division, absolute_import, print_function)
REMOVE_THIS_KEY = object()
def mergeargs(argvalue, remove=False):
if not argvalue:
return None
r = {}
for subval in argvalue:
mergedicts(r, dict([subval]), remove=remove)
return r
def _clear_special_values(d):
'''Remove REMOVE_THIS_KEY values from dictionary
'''
l = [d]
while l:
i = l.pop()
pops = []
for k, v in i.items():
if v is REMOVE_THIS_KEY:
pops.append(k)
elif isinstance(v, dict):
l.append(v)
for k in pops:
i.pop(k)
def mergedicts(d1, d2, remove=True):
'''Recursively merge two dictionaries
First dictionary is modified in-place.
'''
_setmerged(d1, d2)
for k in d2:
if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict):
mergedicts(d1[k], d2[k], remove)
elif remove and d2[k] is REMOVE_THIS_KEY:
d1.pop(k, None)
else:
if remove and isinstance(d2[k], dict):
_clear_special_values(d2[k])
d1[k] = d2[k]
def mergedefaults(d1, d2):
'''Recursively merge two dictionaries, keeping existing values
First dictionary is modified in-place.
'''
for k in d2:
if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict):
mergedefaults(d1[k], d2[k])
else:
d1.setdefault(k, d2[k])
def _setmerged(d1, d2):
if hasattr(d1, 'setmerged'):
d1.setmerged(d2)
def mergedicts_copy(d1, d2):
'''Recursively merge two dictionaries.
Dictionaries are not modified. Copying happens only if necessary. Assumes
that first dictionary supports .copy() method.
'''
ret = d1.copy()
_setmerged(ret, d2)
for k in d2:
if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], dict):
ret[k] = mergedicts_copy(d1[k], d2[k])
else:
ret[k] = d2[k]
return ret
def updated(d, *args, **kwargs):
'''Copy dictionary and update it with provided arguments
'''
d = d.copy()
d.update(*args, **kwargs)
return d
|
{
"content_hash": "818aa6de04f0a99d2fb9b9f683f6fdd9",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 84,
"avg_line_length": 21.988505747126435,
"alnum_prop": 0.6576058546785154,
"repo_name": "S0lll0s/powerline",
"id": "c06ab30ccd24b16b2daec45ed08aaa0a97ef2a59",
"size": "1943",
"binary": false,
"copies": "18",
"ref": "refs/heads/develop",
"path": "powerline/lib/dict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3781"
},
{
"name": "Lua",
"bytes": "400"
},
{
"name": "Python",
"bytes": "785212"
},
{
"name": "Shell",
"bytes": "58319"
},
{
"name": "VimL",
"bytes": "17731"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division)
__metaclass__ = type
import pwd
import os
import pytest
from ansible import constants
from ansible.module_utils.six import StringIO
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_text
@pytest.fixture
def cfgparser():
CFGDATA = StringIO("""
[defaults]
defaults_one = 'data_defaults_one'
[level1]
level1_one = 'data_level1_one'
""")
p = configparser.ConfigParser()
p.readfp(CFGDATA)
return p
@pytest.fixture
def user():
user = {}
user['uid'] = os.geteuid()
pwd_entry = pwd.getpwuid(user['uid'])
user['username'] = pwd_entry.pw_name
user['home'] = pwd_entry.pw_dir
return user
@pytest.fixture
def cfg_file():
data = '/ansible/test/cfg/path'
old_cfg_file = constants.CONFIG_FILE
constants.CONFIG_FILE = os.path.join(data, 'ansible.cfg')
yield data
constants.CONFIG_FILE = old_cfg_file
@pytest.fixture
def null_cfg_file():
old_cfg_file = constants.CONFIG_FILE
del constants.CONFIG_FILE
yield
constants.CONFIG_FILE = old_cfg_file
@pytest.fixture
def cwd():
data = '/ansible/test/cwd/'
old_cwd = os.getcwd
os.getcwd = lambda: data
old_cwdu = None
if hasattr(os, 'getcwdu'):
old_cwdu = os.getcwdu
os.getcwdu = lambda: to_text(data)
yield data
os.getcwd = old_cwd
if hasattr(os, 'getcwdu'):
os.getcwdu = old_cwdu
class TestMkBoolean:
def test_bools(self):
assert constants.mk_boolean(True) is True
assert constants.mk_boolean(False) is False
def test_none(self):
assert constants.mk_boolean(None) is False
def test_numbers(self):
assert constants.mk_boolean(1) is True
assert constants.mk_boolean(0) is False
assert constants.mk_boolean(0.0) is False
# Current mk_boolean doesn't consider these to be true values
# def test_other_numbers(self):
# assert constants.mk_boolean(2) is True
# assert constants.mk_boolean(-1) is True
# assert constants.mk_boolean(0.1) is True
def test_strings(self):
assert constants.mk_boolean("true") is True
assert constants.mk_boolean("TRUE") is True
assert constants.mk_boolean("t") is True
assert constants.mk_boolean("yes") is True
assert constants.mk_boolean("y") is True
assert constants.mk_boolean("on") is True
|
{
"content_hash": "7f66e979750e8760824d103d387c0412",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 61,
"avg_line_length": 23.70873786407767,
"alnum_prop": 0.6601146601146601,
"repo_name": "thaim/ansible",
"id": "fa9902e77539aed51cc54307c89e89b00662608d",
"size": "3203",
"binary": false,
"copies": "180",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/test_constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from django.test.client import Client
from django.urls import reverse
from django.test import TestCase
from nose.tools import *
from django.contrib.auth import get_user_model
User = get_user_model()
from amon.apps.settings.models import data_retention_model
class TestDataRetention(TestCase):
def setUp(self):
self.c = Client()
self.user = User.objects.create_user(password='qwerty', email='foo@test.com')
self.c.login(username='foo@test.com', password='qwerty')
def tearDown(self):
self.c.logout()
User.objects.all().delete()
def test_data_retention_form(self):
url = reverse('settings_data')
response = self.c.post(url, {'check_every': 60, 'keep_data': 30})
result = data_retention_model.get_one()
assert result['check_every'] == 60
assert result['keep_data'] == 30
response = self.c.post(url, {'check_every': 300, 'keep_data': 60})
result = data_retention_model.get_one()
assert result['check_every'] == 300
assert result['keep_data'] == 60
|
{
"content_hash": "44437953b4756240b491d89eabfffb4e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 85,
"avg_line_length": 24.444444444444443,
"alnum_prop": 0.6345454545454545,
"repo_name": "martinrusev/amonone",
"id": "4f30f9e7f2d7fcd6599ef5b82a4604f5c2a68299",
"size": "1100",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "amon/apps/settings/tests/forms_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77950"
},
{
"name": "JavaScript",
"bytes": "28811"
},
{
"name": "Python",
"bytes": "180983"
},
{
"name": "Ruby",
"bytes": "131"
},
{
"name": "Shell",
"bytes": "5652"
}
],
"symlink_target": ""
}
|
import argparse
from fabric.api import *
from fabric.contrib.files import exists
from dlab.meta_lib import *
import os
from fabric.contrib.files import exists
from dlab.fab import *
parser = argparse.ArgumentParser()
parser.add_argument('--cluster_name', type=str, default='')
parser.add_argument('--spark_version', type=str, default='')
parser.add_argument('--hadoop_version', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--spark_master', type=str, default='')
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--notebook_ip', type=str, default='')
parser.add_argument('--datalake_enabled', type=str, default='false')
args = parser.parse_args()
def configure_notebook(keyfile, hoststring):
templates_dir = '/root/templates/'
scripts_dir = '/root/scripts/'
run('mkdir -p /tmp/{}/'.format(args.cluster_name))
if os.environ['notebook_multiple_clusters'] == 'true':
put(templates_dir + 'dataengine_interpreter_livy.json', '/tmp/{}/dataengine_interpreter.json'.format(args.cluster_name))
else:
put(templates_dir + 'dataengine_interpreter_spark.json', '/tmp/{}/dataengine_interpreter.json'.format(args.cluster_name))
put(templates_dir + 'notebook_spark-defaults_local.conf', '/tmp/{}/notebook_spark-defaults_local.conf'.format(args.cluster_name))
spark_master_ip = args.spark_master.split('//')[1].split(':')[0]
spark_memory = get_spark_memory(True, args.os_user, spark_master_ip, keyfile)
run('echo "spark.executor.memory {0}m" >> /tmp/{1}/notebook_spark-defaults_local.conf'.format(spark_memory, args.cluster_name))
if not exists('/usr/local/bin/zeppelin_dataengine_create_configs.py'):
put(scripts_dir + 'zeppelin_dataengine_create_configs.py', '/usr/local/bin/zeppelin_dataengine_create_configs.py', use_sudo=True)
sudo('chmod 755 /usr/local/bin/zeppelin_dataengine_create_configs.py')
if not exists('/usr/lib/python2.7/dlab/'):
sudo('mkdir -p /usr/lib/python2.7/dlab/')
put('/usr/lib/python2.7/dlab/*', '/usr/lib/python2.7/dlab/', use_sudo=True)
sudo('chmod a+x /usr/lib/python2.7/dlab/*')
if exists('/usr/lib64'):
sudo('ln -fs /usr/lib/python2.7/dlab /usr/lib64/python2.7/dlab')
if __name__ == "__main__":
env.hosts = "{}".format(args.notebook_ip)
env.user = args.os_user
env.key_filename = "{}".format(args.keyfile)
env.host_string = env.user + "@" + env.hosts
try:
region = os.environ['aws_region']
except:
region = ''
configure_notebook(args.keyfile, env.host_string)
livy_version = os.environ['notebook_livy_version']
r_enabled = os.environ['notebook_r_enabled']
sudo("/usr/bin/python /usr/local/bin/zeppelin_dataengine_create_configs.py "
"--cluster_name {} --spark_version {} --hadoop_version {} --os_user {} --spark_master {} --keyfile {} \
--notebook_ip {} --livy_version {} --multiple_clusters {} --region {} --datalake_enabled {} --r_enabled {}".
format(args.cluster_name, args.spark_version, args.hadoop_version, args.os_user, args.spark_master,
args.keyfile, args.notebook_ip, livy_version, os.environ['notebook_multiple_clusters'], region,
args.datalake_enabled, r_enabled))
|
{
"content_hash": "3035778644bea14d6c48f24926b7bd16",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 137,
"avg_line_length": 54.22950819672131,
"alnum_prop": 0.6689842805320435,
"repo_name": "epam/DLab",
"id": "9e55ca1738551d7bd97750c0efad95639f55cac3",
"size": "4077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infrastructure-provisioning/src/general/scripts/os/zeppelin_install_dataengine_kernels.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "81633"
},
{
"name": "HTML",
"bytes": "110323"
},
{
"name": "Java",
"bytes": "2473499"
},
{
"name": "Jupyter Notebook",
"bytes": "80955"
},
{
"name": "Python",
"bytes": "1861086"
},
{
"name": "R",
"bytes": "4894"
},
{
"name": "Ruby",
"bytes": "62731"
},
{
"name": "Shell",
"bytes": "18826"
},
{
"name": "TypeScript",
"bytes": "363308"
}
],
"symlink_target": ""
}
|
from django.db import models
class Ledjure(models.Model):
''' A ledjure of transactions'''
shoky = models.DecimalField(max_digits=6,decimal_places=2,null=True,blank=True,) # Yeah not More than 4 digits!
shoky_description = models.TextField(blank=True,null=True) #You better have a good explaination
necessity = models.DecimalField(max_digits=7,decimal_places=2,null=True,blank=True,default=0.0) # 5 digits for now!
necessity_description = models.TextField(max_length=140,null=True,blank=True,) #Its important i can understand, explain less twitter style
time_stamp = models.DateField(null=True,blank=True,)
|
{
"content_hash": "5835f06cdb45ccbb5a09e95f00921887",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 149,
"avg_line_length": 56.083333333333336,
"alnum_prop": 0.7087667161961367,
"repo_name": "NavaneethBhat/lekkachara",
"id": "c0dfee6975d6270112f520ee8c774e5ea398bf99",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracker/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8761"
}
],
"symlink_target": ""
}
|
"""Xgboost pyspark integration submodule for helper functions."""
import inspect
import json
import logging
import sys
from threading import Thread
from typing import Any, Callable, Dict, List, Set, Type
import pyspark
from pyspark import BarrierTaskContext, SparkContext
from pyspark.sql.session import SparkSession
from xgboost.tracker import RabitTracker
from xgboost import collective
def get_class_name(cls: Type) -> str:
"""Return the class name."""
return f"{cls.__module__}.{cls.__name__}"
def _get_default_params_from_func(
func: Callable, unsupported_set: Set[str]
) -> Dict[str, Any]:
"""Returns a dictionary of parameters and their default value of function fn. Only
the parameters with a default value will be included.
"""
sig = inspect.signature(func)
filtered_params_dict = {}
for parameter in sig.parameters.values():
# Remove parameters without a default value and those in the unsupported_set
if (
parameter.default is not parameter.empty
and parameter.name not in unsupported_set
):
filtered_params_dict[parameter.name] = parameter.default
return filtered_params_dict
class CommunicatorContext:
"""A context controlling collective communicator initialization and finalization.
This isn't specificially necessary (note Part 3), but it is more understandable
coding-wise.
"""
def __init__(self, context: BarrierTaskContext, **args: Any) -> None:
self.args = args
self.args["DMLC_TASK_ID"] = str(context.partitionId())
def __enter__(self) -> None:
collective.init(**self.args)
def __exit__(self, *args: Any) -> None:
collective.finalize()
def _start_tracker(context: BarrierTaskContext, n_workers: int) -> Dict[str, Any]:
"""Start Rabit tracker with n_workers"""
env: Dict[str, Any] = {"DMLC_NUM_WORKER": n_workers}
host = _get_host_ip(context)
rabit_context = RabitTracker(host_ip=host, n_workers=n_workers)
env.update(rabit_context.worker_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env
def _get_rabit_args(context: BarrierTaskContext, n_workers: int) -> Dict[str, Any]:
"""Get rabit context arguments to send to each worker."""
env = _start_tracker(context, n_workers)
return env
def _get_host_ip(context: BarrierTaskContext) -> str:
"""Gets the hostIP for Spark. This essentially gets the IP of the first worker."""
task_ip_list = [info.address.split(":")[0] for info in context.getTaskInfos()]
return task_ip_list[0]
def _get_args_from_message_list(messages: List[str]) -> Dict[str, Any]:
"""A function to send/recieve messages in barrier context mode"""
output = ""
for message in messages:
if message != "":
output = message
break
return json.loads(output)
def _get_spark_session() -> SparkSession:
"""Get or create spark session. Note: This function can only be invoked from driver
side.
"""
if pyspark.TaskContext.get() is not None:
# This is a safety check.
raise RuntimeError(
"_get_spark_session should not be invoked from executor side."
)
return SparkSession.builder.getOrCreate()
def get_logger(name: str, level: str = "INFO") -> logging.Logger:
"""Gets a logger by name, or creates and configures it for the first time."""
logger = logging.getLogger(name)
logger.setLevel(level)
# If the logger is configured, skip the configure
if not logger.handlers and not logging.getLogger().handlers:
handler = logging.StreamHandler(sys.stderr)
logger.addHandler(handler)
return logger
def _get_max_num_concurrent_tasks(spark_context: SparkContext) -> int:
"""Gets the current max number of concurrent tasks."""
# pylint: disable=protected-access
# spark 3.1 and above has a different API for fetching max concurrent tasks
if spark_context._jsc.sc().version() >= "3.1":
return spark_context._jsc.sc().maxNumConcurrentTasks(
spark_context._jsc.sc().resourceProfileManager().resourceProfileFromId(0)
)
return spark_context._jsc.sc().maxNumConcurrentTasks()
def _is_local(spark_context: SparkContext) -> bool:
"""Whether it is Spark local mode"""
# pylint: disable=protected-access
return spark_context._jsc.sc().isLocal()
def _get_gpu_id(task_context: BarrierTaskContext) -> int:
"""Get the gpu id from the task resources"""
if task_context is None:
# This is a safety check.
raise RuntimeError("_get_gpu_id should not be invoked from driver side.")
resources = task_context.resources()
if "gpu" not in resources:
raise RuntimeError(
"Couldn't get the gpu id, Please check the GPU resource configuration"
)
# return the first gpu id.
return int(resources["gpu"].addresses[0].strip())
|
{
"content_hash": "e8db937da023cf403eadb12dec9451f7",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 87,
"avg_line_length": 34.342465753424655,
"alnum_prop": 0.6747108097327483,
"repo_name": "dmlc/xgboost",
"id": "36705459ae4f6f4f308b0c92e2028916b8c0fe07",
"size": "5014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-package/xgboost/spark/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1383"
},
{
"name": "C",
"bytes": "23067"
},
{
"name": "C++",
"bytes": "2182522"
},
{
"name": "CMake",
"bytes": "52394"
},
{
"name": "CSS",
"bytes": "3812"
},
{
"name": "Cuda",
"bytes": "855374"
},
{
"name": "Dockerfile",
"bytes": "2364"
},
{
"name": "Groovy",
"bytes": "1251"
},
{
"name": "Java",
"bytes": "206549"
},
{
"name": "M4",
"bytes": "2131"
},
{
"name": "Makefile",
"bytes": "8179"
},
{
"name": "PowerShell",
"bytes": "4308"
},
{
"name": "Python",
"bytes": "1189411"
},
{
"name": "R",
"bytes": "342898"
},
{
"name": "Scala",
"bytes": "471040"
},
{
"name": "Shell",
"bytes": "45815"
},
{
"name": "TeX",
"bytes": "913"
}
],
"symlink_target": ""
}
|
"""Dump memory that does not belong to any symbol."""
import sys
from elftools.elf.elffile import ELFFile # type: ignore
import memdf.collect
import memdf.report
import memdf.select
from memdf import Config, ConfigDescription
CONFIG: ConfigDescription = {
**memdf.util.config.CONFIG,
**memdf.select.SECTION_CONFIG,
**memdf.select.REGION_CONFIG,
**memdf.report.REPORT_LIMIT_CONFIG,
**memdf.report.OUTPUT_FILE_CONFIG
}
def hexdump(data, start, length, address=0):
while length > 0:
iaddress = address & ~0xF
h = ''
s = ''
for i in range(0, 16):
if length == 0 or (iaddress + i < address):
h += ' --'
s += ' '
else:
b = data[start]
start += 1
length -= 1
address += 1
h += f' {b:02X}'
c = chr(b)
if c.isascii() and c.isprintable():
s += c
else:
s += '.'
yield f'{iaddress:08X}: {h} {s}'
def main(argv):
status = 0
try:
config = Config().init(CONFIG)
config.argparse.add_argument('inputs', metavar='FILE', nargs='+')
config.parse(argv)
config['collect.method'] = 'elftools'
config['args.tag_inputs'] = True
dfs = memdf.collect.collect_files(config)
elf = {}
for filename in config['args.inputs']:
elf[filename] = {
'elffile': ELFFile(open(filename, 'rb')),
'section': {},
'data': {},
'limit': {},
}
with memdf.report.open_output(config) as fp:
for i in dfs['gap'].itertuples():
e = elf[i.input]
if i.section in e['section']:
section = e['section'][i.section]
data = e['data'][i.section]
limit = e['limit'][i.section]
else:
section = e['elffile'].get_section_by_name(i.section)
data = section.data()
limit = memdf.select.get_limit(
config, 'section', i.section)
e['section'][i.section] = section
e['data'][i.section] = data
e['limit'][i.section] = limit
if limit and i.size < limit:
continue
offset = i.address - section['sh_addr']
assert section['sh_size'] == len(data)
print('\n{:08X} length {} in section {} of {}'.format(
i.address, i.size, i.section, i.input), file=fp)
for i in hexdump(data, offset, i.size, i.address):
print(i, file=fp)
except Exception as exception:
raise exception
return status
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "56984e29358c05b9d5b03791683c7c90",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 73,
"avg_line_length": 30.729166666666668,
"alnum_prop": 0.4711864406779661,
"repo_name": "project-chip/connectedhomeip",
"id": "21db90650144d45b4bab054ef86599a6e3f294b8",
"size": "3561",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/tools/memory/gaps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1759301"
},
{
"name": "C++",
"bytes": "19104548"
},
{
"name": "CMake",
"bytes": "140510"
},
{
"name": "Dockerfile",
"bytes": "50353"
},
{
"name": "Emacs Lisp",
"bytes": "1042"
},
{
"name": "Java",
"bytes": "167719"
},
{
"name": "JavaScript",
"bytes": "2106"
},
{
"name": "Jinja",
"bytes": "22322"
},
{
"name": "Objective-C",
"bytes": "930838"
},
{
"name": "Objective-C++",
"bytes": "435348"
},
{
"name": "Python",
"bytes": "1931007"
},
{
"name": "Shell",
"bytes": "195843"
},
{
"name": "Tcl",
"bytes": "311"
},
{
"name": "ZAP",
"bytes": "584219"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.auth.backends import ModelBackend
class ShibbolethUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``OpenIdUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, shib_meta):
"""
To authenticate engage the OpenId Connect login procedure.
The username returned by this process is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not shib_meta: return
user = None
username = self.clean_username(shib_meta['eppn'])
UserModel = get_user_model()
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel.objects.get_or_create(**{
UserModel.USERNAME_FIELD: username,
})
if created:
user = self.configure_user(user, shib_meta)
else:
try:
user = UserModel.objects.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user, shib_meta):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
user.__setattr__('first_name', shib_meta['givenName'].split(";")[0])
user.__setattr__('last_name', shib_meta['sn'].split(";")[0])
user.__setattr__('email', shib_meta['email'].split(";")[0])
user.save()
return user
|
{
"content_hash": "3858c87f5f0fdab42f78309ce2cef712",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 39.71830985915493,
"alnum_prop": 0.6365248226950354,
"repo_name": "biancini/Django-AccountLinking",
"id": "155d3e2154e507e17646f8cb3dac2408d34df23d",
"size": "2820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linking_service/shib_django/backends.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6592"
},
{
"name": "Python",
"bytes": "87257"
}
],
"symlink_target": ""
}
|
import base64
import re
import marshmallow
from nose.tools import assert_equal, assert_in, assert_true, raises
from . import BaseTestCase
from tuxedo_mask import models, views
from tuxedo_mask.views import users_view
class TestContainsAtLeast:
def test(self):
value = 'foo'
validator = users_view.ContainsAtLeast(choices=['f'])
assert_equal(validator(value=value), value)
@raises(marshmallow.exceptions.ValidationError)
def test_raises_validation_error(self):
value = 'foo'
validator = users_view.ContainsAtLeast(choices=['x'])
validator(value=value)
def test_formats_error_message(self):
value = 'foo'
choices = ['f']
error_pattern = 'input (.+) choices (.+) count (\d+)'
error = 'input {input} choices {choices} count {count}'
validator = users_view.ContainsAtLeast(choices=choices, error=error)
error_message = validator._format_error(value=value)
result = re.match(pattern=error_pattern, string=error_message)
assert_true(result)
assert_equal(result.groups()[0], value)
assert_in(choices[0], result.groups()[1])
assert_equal(result.groups()[2], '1')
class TestUsersView(BaseTestCase):
@property
def _Model(self):
return models.Users
@property
def _View(self):
return views.UsersView
@property
def fields(self):
return ['username', 'password']
@property
def values(self):
encoded_password = (
base64.b64encode('Foo45678'.encode('utf-8')).decode('utf-8'))
return ['foo', encoded_password]
def help_validate(self, field, value, keyword):
value = base64.b64encode(value.encode('utf-8')).decode('utf-8')
super().help_validate(field=field, value=value, keyword=keyword)
def test_username_minimum_length(self):
self.help_validate(field='username',
value=str(),
keyword='length')
def test_username_maximum_length(self):
self.help_validate(field='username',
value='x' * 50,
keyword='length')
def test_password_minimum_length(self):
self.help_validate(field='password',
value='Foo4567',
keyword='length')
def test_password_contains_lowercase_characters(self):
self.help_validate(field='password',
value='FOO45678',
keyword='lowercase')
def test_password_contains_uppercase_characters(self):
self.help_validate(field='password',
value='foo45678',
keyword='uppercase')
def test_password_contains_numeric_characters(self):
self.help_validate(field='password',
value='Fooooooo',
keyword='numeric')
def test_decode_password_does_not_mutate_inplace(self):
password = base64.b64encode('foo'.encode('utf-8'))
data = {'password': password}
self._View().decode_password(data=data)
assert_equal(data['password'], password)
|
{
"content_hash": "03273995f2316f812512b056962c3c68",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 76,
"avg_line_length": 32.18,
"alnum_prop": 0.5922933499067744,
"repo_name": "dnguyen0304/tuxedo-mask",
"id": "cd1e5f239ff18bd86dc30b36df454357eebbda16",
"size": "3243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tuxedo_mask/views/tests/test_users_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52980"
},
{
"name": "Shell",
"bytes": "6402"
}
],
"symlink_target": ""
}
|
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from .. import exc as sa_exc, inspect
from .. import util, log, event
from ..sql import util as sql_util, visitors
from .. import sql
from . import (
attributes, interfaces, exc as orm_exc, loading,
unitofwork, util as orm_util
)
from .state import InstanceState
from .util import _none_set
from . import properties
from .interfaces import (
LoaderStrategy, StrategizedProperty
)
from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE
from .session import _state_session
import itertools
def _register_attribute(
strategy, mapper, useobject,
compare_function=None,
typecallable=None,
uselist=False,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
prop = strategy.parent_property
attribute_ext = list(util.to_list(prop.extension, default=[]))
listen_hooks = []
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(
desc,
prop.key, fn, **opts)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
backref = kw.pop('backref', None)
if backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(
desc,
backref,
uselist
)
)
for m in mapper.self_and_descendants:
if prop is m._props.get(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
extension=attribute_ext,
trackparent=useobject and (
prop.single_parent
or prop.direction is interfaces.ONETOMANY),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent the a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = 'columns',
def __init__(self, parent):
super(UninstrumentedColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
def setup_query(
self, context, entity, path, loadopt, adapter,
column_collection=None, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = 'columns', 'is_composite'
def __init__(self, parent):
super(ColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, 'composite_class')
def setup_query(
self, context, entity, path, loadopt,
adapter, column_collection, memoized_populators, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = self.parent_property.active_history or \
self.columns[0].primary_key or \
mapper.version_id_col in set(self.columns)
_register_attribute(
self, mapper, useobject=False,
compare_function=coltype.compare_values,
active_history=active_history
)
def create_row_processor(
self, context, path,
loadopt, mapper, result, adapter, populators):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = 'columns', 'group'
def __init__(self, parent):
super(DeferredColumnLoader, self).__init__(parent)
if hasattr(self.parent_property, 'composite_class'):
raise NotImplementedError("Deferred loading for composite "
"types not implemented yet")
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
# this path currently does not check the result
# for the column; this is because in most cases we are
# working just with the setup_query() directive which does
# not support this, and the behavior here should be consistent.
if not self.is_class_level:
set_deferred_for_local_state = \
self.parent_property._deferred_column_loader
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self, mapper, useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
expire_missing=False
)
def setup_query(
self, context, entity, path, loadopt,
adapter, column_collection, memoized_populators,
only_load_props=None, **kw):
if (
(
loadopt and
'undefer_pks' in loadopt.local_opts and
set(self.columns).intersection(
self.parent._should_undefer_in_wildcard)
)
or
(
loadopt and
self.group and
loadopt.local_opts.get('undefer_group', False) == self.group
)
or
(
only_load_props and self.key in only_load_props
)
):
self.parent_property._get_strategy_by_cls(ColumnLoader).\
setup_query(context, entity,
path, loadopt, adapter,
column_collection, memoized_populators, **kw)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
else:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key for p in
localparent.iterate_properties
if isinstance(p, StrategizedProperty) and
isinstance(p.strategy, DeferredColumnLoader) and
p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
query = session.query(localparent)
if loading.load_on_ident(
query, state.key,
only_load_props=group, refresh_state=state) is None:
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
strategy = prop._strategies[DeferredColumnLoader]
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = 'mapper', 'target', 'uselist'
def __init__(self, parent):
super(AbstractRelationshipLoader, self).__init__(parent)
self.mapper = self.parent_property.mapper
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
__slots__ = ()
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self, mapper,
useobject=True,
uselist=self.parent_property.uselist,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self, context, path, loadopt, mapper,
result, adapter, populators):
def invoke_no_load(state, dict_, row):
state._initialize(self.key)
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
'_lazywhere', '_rev_lazywhere', 'use_get', '_bind_to_col',
'_equated_columns', '_rev_bind_to_col', '_rev_equated_columns',
'_simple_lazy_clause')
def __init__(self, parent):
super(LazyLoader, self).__init__(parent)
join_condition = self.parent_property._join_condition
self._lazywhere, \
self._bind_to_col, \
self._equated_columns = join_condition.create_lazy_clause()
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns = join_condition.create_lazy_clause(
reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
self.use_get = not self.uselist and \
self.mapper._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info("%s will use query.get() to "
"optimize instance loads", self)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history or
self.parent_property.direction is not interfaces.MANYTOONE or
not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(
self,
mapper,
useobject=True,
callable_=self._load_for_state,
uselist=self.parent_property.uselist,
backref=self.parent_property.back_populates,
typecallable=self.parent_property.collection_class,
active_history=active_history
)
def _memoized_attr__simple_lazy_clause(self):
criterion, bind_to_col = (
self._lazywhere,
self._bind_to_col
)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
if bindparam._identifying_key in bind_to_col:
params.append((
bindparam.key, bind_to_col[bindparam._identifying_key],
None))
else:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {'bindparam': visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys])
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & attributes.INIT_OK:
passive ^= attributes.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive)
params[key] = value
return criterion, params
def _load_for_state(self, state, passive):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
ident_key = None
if (
(not passive & attributes.SQL_OK and not self.use_get)
or
(not passive & attributes.NON_PERSISTENT_OK and pending)
):
return attributes.PASSIVE_NO_RESULT
session = _state_session(state)
if not session:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
ident = self._get_ident_for_use_get(
session,
state,
passive
)
if attributes.PASSIVE_NO_RESULT in ident:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in ident:
return attributes.NEVER_SET
if _none_set.issuperset(ident):
return None
ident_key = self.mapper.identity_key_from_primary_key(ident)
instance = loading.get_from_identity(session, ident_key, passive)
if instance is not None:
return instance
elif not passive & attributes.SQL_OK or \
not passive & attributes.RELATED_OBJECT_OK:
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(session, state, ident_key, passive)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(
state,
dict_,
self._equated_columns[pk],
passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self, strategy_options, session, state, ident_key, passive):
q = session.query(self.mapper)._adapt_all_clauses()
if self.parent_property.secondary is not None:
q = q.select_from(self.mapper, self.parent_property.secondary)
q = q._with_invoke_all_eagers(False)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q = q.autoflush(False)
if state.load_path:
q = q._with_current_path(state.load_path[self.parent_property])
if state.load_options:
q = q._conditional_options(*state.load_options)
if self.use_get:
return loading.load_on_ident(q, ident_key)
if self.parent_property.order_by:
q = q.order_by(*util.to_list(self.parent_property.order_by))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, LazyLoader):
q = q.options(
strategy_options.Load(rev.parent).lazyload(rev.key))
lazy_clause, params = self._generate_lazy_clause(
state, passive=passive)
if pending:
if util.has_intersection(
orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
q = q.filter(lazy_clause).params(params)
result = q.all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = InstanceState._instance_level_callable_processor(
mapper.class_manager,
LoadLazyAttribute(key, self._strategy_keys[0]), key)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key, strategy_key=(('lazy', 'select'),)):
self.key = key
self.strategy_key = strategy_key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[self.strategy_key]
return strategy._load_for_state(state, passive)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(AbstractRelationshipLoader):
__slots__ = ()
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(
self, context, entity,
path, loadopt, adapter, column_collection=None,
parentmapper=None, **kwargs):
pass
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
populators["delayed"].append((self.key, load_immediate))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(AbstractRelationshipLoader):
__slots__ = 'join_depth',
def __init__(self, parent):
super(SubqueryLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(
self, context, entity,
path, loadopt, adapter,
column_collection=None,
parentmapper=None, **kwargs):
if not context.query._enable_eagerloads:
return
elif context.query._yield_per:
context.query._no_yield_per("subquery")
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic", None)
if with_poly_info is not None:
effective_entity = with_poly_info.entity
else:
effective_entity = self.mapper
subq_path = context.attributes.get(
('subquery_path', None),
orm_util.PathRegistry.root)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
leftmost_mapper, leftmost_attr, leftmost_relationship = \
self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader),
context.query)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship,
entity.entity_zero
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
('subquery_path', None): subq_path
}
q = q._set_enable_single_crit(False)
to_join, local_attr, parent_alias = \
self._prep_for_joins(left_alias, subq_path)
q = q.order_by(*local_attr)
q = q.add_columns(*local_attr)
q = self._apply_joins(
q, to_join, left_alias,
parent_alias, effective_entity)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if self.parent.isa(subq_mapper) and \
self.parent_property is subq_path[1]:
leftmost_mapper, leftmost_prop = \
self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = \
subq_mapper, \
subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
getattr(
subq_path[0].entity,
leftmost_mapper._columntoproperty[c].key)
for c in leftmost_cols
]
return leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(
self,
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship, orig_entity
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set a real "from" if not present, as this is more
# accurate than just going off of the column expression
if not q._from_obj and orig_entity.mapper.isa(leftmost_mapper):
q._set_select_from([orig_entity], False)
target_cols = q._adapt_col_list(leftmost_attr)
# select from the identity columns of the outer
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(
leftmost_mapper, embed_q,
use_mapper_path=True)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) > 1:
info = inspect(to_join[-1][0])
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
elif info.mapper.isa(self.parent):
# In the case of multiple levels, retrieve
# it from subq_path[-2]. This is the same as self.parent
# in the vast majority of cases, and [ticket:2014]
# illustrates a case where sub_path[-2] is a subclass
# of self.parent
parent_alias = orm_util.AliasedClass(
to_join[-1][0],
use_mapper_path=True)
else:
# if of_type() were used leading to this relationship,
# self.parent is more specific than subq_path[-2]
parent_alias = orm_util.AliasedClass(
self.parent,
use_mapper_path=True)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(
self, q, to_join, left_alias, parent_alias,
effective_entity):
for i, (mapper, key) in enumerate(to_join):
# we need to use query.join() as opposed to
# orm.join() here because of the
# rich behavior it brings when dealing with
# "with_polymorphic" mappers. "aliased"
# and "from_joinpoint" take care of most of
# the chaining and aliasing for us.
first = i == 0
middle = i < len(to_join) - 1
second_to_last = i == len(to_join) - 2
last = i == len(to_join) - 1
if first:
attr = getattr(left_alias, key)
if last and effective_entity is not self.mapper:
attr = attr.of_type(effective_entity)
else:
if last and effective_entity is not self.mapper:
attr = getattr(parent_alias, key).\
of_type(effective_entity)
else:
attr = getattr(mapper.entity, key)
if second_to_last:
q = q.join(parent_alias, attr, from_joinpoint=True)
else:
q = q.join(attr, aliased=middle, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = dict(
(k, [vv[0] for vv in v])
for k, v in itertools.groupby(
self.subq,
lambda x: x[1:]
)
)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
path = path[self.parent_property]
subq = path.get(context.attributes, 'subquery')
if subq is None:
return
assert subq.session is context.session, (
"Subquery session doesn't refer to that of "
"our context. Are there broken context caching "
"schemes being used?"
)
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, 'collections', collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
self._create_collection_loader(
context, collections, local_cols, populators)
else:
self._create_scalar_loader(
context, collections, local_cols, populators)
def _create_collection_loader(
self, context, collections, local_cols, populators):
def load_collection_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
()
)
state.get_impl(self.key).\
set_committed_value(state, dict_, collection)
populators["new"].append((self.key, load_collection_from_subq))
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
def _create_scalar_loader(
self, context, collections, local_cols, populators):
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
(None,)
)
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
scalar = collection[0]
state.get_impl(self.key).\
set_committed_value(state, dict_, scalar)
populators["new"].append((self.key, load_scalar_from_subq))
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
__slots__ = 'join_depth',
def __init__(self, parent):
super(JoinedLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).init_class_attribute(mapper)
def setup_query(
self, context, entity, path, loadopt, adapter,
column_collection=None, parentmapper=None,
chained_from_outerjoin=False,
**kwargs):
"""Add a left outer join to the statement that's being constructed."""
if not context.query._enable_eagerloads:
return
elif context.query._yield_per and self.uselist:
context.query._no_yield_per("joined collection")
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
clauses, adapter, add_to_collection = \
self._setup_query_on_user_defined_adapter(
context, entity, path, adapter,
user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
clauses, adapter, add_to_collection, chained_from_outerjoin = \
self._generate_row_adapter(
context, entity, path, loadopt, adapter,
column_collection, parentmapper, chained_from_outerjoin
)
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info is not None:
with_polymorphic = with_poly_info.with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.mapper]
loading._setup_entity_query(
context, self.mapper, entity,
path, clauses, add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin)
if with_poly_info is not None and \
None in set(context.secondary_columns):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
context.attributes,
"user_defined_eager_row_processor", False)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
#from .mapper import Mapper
#from .interfaces import MapperProperty
#assert isinstance(root_mapper, Mapper)
#assert isinstance(prop, MapperProperty)
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(
alias,
equivalents=prop.mapper._equivalent_columns)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic")
adapter = orm_util.ORMAdapter(
with_poly_info.entity,
equivalents=prop.mapper._equivalent_columns)
else:
adapter = context.query._polymorphic_adapters.get(
prop.mapper, None)
path.set(
context.attributes,
"user_defined_eager_row_processor",
adapter)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity,
path, adapter, user_defined_adapter):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _generate_row_adapter(
self,
context, entity, path, loadopt, adapter,
column_collection, parentmapper, chained_from_outerjoin):
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info:
to_adapt = with_poly_info.entity
else:
to_adapt = orm_util.AliasedClass(
self.mapper,
flat=True,
use_mapper_path=True)
clauses = orm_util.ORMAdapter(
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True, allow_label_resolve=False,
anonymize_labels=True)
assert clauses.aliased_class is not None
if self.parent_property.uselist:
context.multi_row_eager_loaders = True
innerjoin = (
loadopt.local_opts.get(
'innerjoin', self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
context.create_eager_joins.append(
(
self._create_eager_join, context,
entity, path, adapter,
parentmapper, clauses, innerjoin, chained_from_outerjoin
)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self, context, entity,
path, adapter, parentmapper,
clauses, innerjoin, chained_from_outerjoin):
if parentmapper is None:
localparent = entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = context.multi_row_eager_loaders and \
context.query._should_nest_selectable
entity_key = None
if entity not in context.eager_joins and \
not should_nest_selectable and \
context.from_clause:
index, clause = sql_util.find_join_source(
context.from_clause, entity.selectable)
if clause is not None:
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
entity_key, default_towrap = index, clause
if entity_key is None:
entity_key, default_towrap = entity, entity.selectable
towrap = context.eager_joins.setdefault(entity_key, default_towrap)
if adapter:
if getattr(adapter, 'aliased_class', None):
onclause = getattr(
adapter.aliased_class, self.key,
self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent,
adapter.selectable,
use_mapper_path=True
),
self.key, self.parent_property
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
attach_on_outside = (
not chained_from_outerjoin or
not innerjoin or innerjoin == 'unnested')
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin or (
chained_from_outerjoin and isinstance(towrap, sql.Join)
), _left_memo=self.parent, _right_memo=self.mapper
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, towrap, clauses, onclause)
context.eager_joins[entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = entity.selectable
if self.parent_property.secondary is None and \
not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin):
if localparent.mapped_table.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
def _splice_nested_inner_join(
self, path, join_obj, clauses, onclause, splicing=False):
if splicing is False:
# first call is always handed a join object
# from the outside
assert isinstance(join_obj, orm_util._ORMJoin)
elif isinstance(join_obj, sql.selectable.FromGrouping):
return self._splice_nested_inner_join(
path, join_obj.element, clauses, onclause, splicing
)
elif not isinstance(join_obj, orm_util._ORMJoin):
if path[-2] is splicing:
return orm_util._ORMJoin(
join_obj, clauses.aliased_class,
onclause, isouter=False,
_left_memo=splicing,
_right_memo=path[-1].mapper
)
else:
# only here if splicing == True
return None
target_join = self._splice_nested_inner_join(
path, join_obj.right, clauses,
onclause, join_obj._right_memo)
if target_join is None:
right_splice = False
target_join = self._splice_nested_inner_join(
path, join_obj.left, clauses,
onclause, join_obj._left_memo)
if target_join is None:
# should only return None when recursively called,
# e.g. splicing==True
assert splicing is not False, \
"assertion failed attempting to produce joined eager loads"
return None
else:
right_splice = True
if right_splice:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left, target_join,
join_obj.onclause, isouter=join_obj.isouter,
_left_memo=join_obj._left_memo)
else:
eagerjoin = orm_util._ORMJoin(
target_join, join_obj.right,
join_obj.onclause, isouter=join_obj.isouter,
_right_memo=join_obj._right_memo)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self, context, path, loadopt, mapper,
result, adapter, populators):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self
)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context,
result,
adapter, our_path, loadopt)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
self.mapper,
context,
result,
our_path[self.mapper],
eager_adapter)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators)
else:
self.parent_property._get_strategy_by_cls(LazyLoader).\
create_row_processor(
context, path, loadopt,
mapper, result, adapter, populators)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
collection = attributes.init_state_collection(
state, dict_, key)
result_list = util.UniqueAppender(collection,
'append_without_event')
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key)
result_list = util.UniqueAppender(
collection,
'append_without_event')
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_collection_from_joined_new_row))
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row))
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec))
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
if existing is not None \
and key in dict_ \
and existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row))
if context.invoke_all_eagers:
populators["eager"].append((self.key, load_scalar_from_joined_exec))
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent." %
(orm_util.instance_str(value), state.class_, prop)
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, 'append', append, raw=True, retval=True,
active_history=True)
event.listen(
desc, 'set', set_, raw=True, retval=True,
active_history=True)
|
{
"content_hash": "c9172e73056421a0068715c7b0b0db93",
"timestamp": "",
"source": "github",
"line_count": 1598,
"max_line_length": 81,
"avg_line_length": 36.084480600750936,
"alnum_prop": 0.5667065535958934,
"repo_name": "bootandy/sqlalchemy",
"id": "b9ef5808bb9de5a01e346a8739fe31d9f822ad97",
"size": "57901",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/orm/strategies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46062"
},
{
"name": "Python",
"bytes": "8858611"
}
],
"symlink_target": ""
}
|
"""
Collection of test scripts for datumio
"""
|
{
"content_hash": "e2277c18f7b1c2eb5c6f835f504dc1b0",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 38,
"avg_line_length": 15.666666666666666,
"alnum_prop": 0.7021276595744681,
"repo_name": "longubu/datumio",
"id": "3c5fe08fad615020039d0b7e030af0fea0473e85",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65129"
}
],
"symlink_target": ""
}
|
"""Implementation of DPLL algorithm
Further improvements: eliminate calls to pl_true, implement branching rules,
efficient unit propagation.
References:
- http://en.wikipedia.org/wiki/DPLL_algorithm
- http://bioinformatics.louisville.edu/ouyang/MingOuyangThesis.pdf
"""
from sympy.core import Symbol
from sympy import Predicate
from sympy.logic.boolalg import Or, Not, conjuncts, disjuncts, to_cnf, \
to_int_repr
from sympy.logic.inference import pl_true, literal_symbol
def dpll_satisfiable(expr):
"""
Check satisfiability of a propositional sentence.
It returns a model rather than True when it succeeds
>>> from sympy import symbols
>>> from sympy.abc import A, B
>>> from sympy.logic.algorithms.dpll import dpll_satisfiable
>>> dpll_satisfiable(A & ~B)
{A: True, B: False}
>>> dpll_satisfiable(A & ~A)
False
"""
symbols = list(expr.atoms(Symbol, Predicate))
symbols_int_repr = set(range(1, len(symbols) + 1))
clauses = conjuncts(to_cnf(expr))
clauses_int_repr = to_int_repr(clauses, symbols)
result = dpll_int_repr(clauses_int_repr, symbols_int_repr, {})
if not result:
return result
output = {}
for key in result:
output.update({symbols[key-1]: result[key]})
return output
def dpll(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Clauses is an array of conjuncts.
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import dpll
>>> dpll([A, B, D], [A, B], {D: False})
False
"""
# compute DP kernel
P, value = find_unit_clause(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value: P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_unit_clause(clauses, model)
P, value = find_pure_symbol(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value: P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_pure_symbol(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true(c, model)
if val == False:
return False
if val != True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
if not clauses: return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols[:]
return (dpll(unit_propagate(unknown_clauses, P), symbols, model) or
dpll(unit_propagate(unknown_clauses, Not(P)), symbols_copy, model_copy))
def dpll_int_repr(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Arguments are expected to be in integer representation
>>> from sympy.logic.algorithms.dpll import dpll_int_repr
>>> dpll_int_repr([set([1]), set([2]), set([3])], set([1, 2]), {3: False})
False
"""
# compute DP kernel
P, value = find_unit_clause_int_repr(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_unit_clause_int_repr(clauses, model)
P, value = find_pure_symbol_int_repr(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_pure_symbol_int_repr(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true_int_repr(c, model)
if val is False:
return False
if val is not True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols.copy()
return (dpll_int_repr(unit_propagate_int_repr(unknown_clauses, P), symbols, model) or
dpll_int_repr(unit_propagate_int_repr(unknown_clauses, -P), symbols_copy, model_copy))
### helper methods for DPLL
def pl_true_int_repr(clause, model={}):
"""
Lightweight version of pl_true.
Argument clause represents the set of args of an Or clause. This is used
inside dpll_int_repr, it is not meant to be used directly.
>>> from sympy.logic.algorithms.dpll import pl_true_int_repr
>>> pl_true_int_repr(set([1, 2]), {1: False})
>>> pl_true_int_repr(set([1, 2]), {1: False, 2: False})
False
"""
result = False
for lit in clause:
if lit < 0:
p = model.get(-lit)
if p is not None:
p = not p
else:
p = model.get(lit)
if p is True:
return True
elif p is None:
result = None
return result
def unit_propagate(clauses, symbol):
"""
Returns an equivalent set of clauses
If a set of clauses contains the unit clause l, the other clauses are
simplified by the application of the two following rules:
1. every clause containing l is removed
2. in every clause that contains ~l this literal is deleted
Arguments are expected to be in CNF.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import unit_propagate
>>> unit_propagate([A | B, D | ~B, B], B)
[D, B]
"""
output = []
for c in clauses:
if c.func != Or:
output.append(c)
continue
for arg in c.args:
if arg == ~symbol:
output.append(Or(*[x for x in c.args if x != ~symbol]))
break
if arg == symbol:
break
else:
output.append(c)
return output
def unit_propagate_int_repr(clauses, s):
"""
Same as unit_propagate, but arguments are expected to be in integer
representation
>>> from sympy.logic.algorithms.dpll import unit_propagate_int_repr
>>> unit_propagate_int_repr([set([1, 2]), set([3, -2]), set([2])], 2)
[set([3])]
"""
negated = set([-s])
return [clause - negated for clause in clauses if s not in clause]
def find_pure_symbol(symbols, unknown_clauses):
"""
Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_pure_symbol
>>> find_pure_symbol([A, B, D], [A|~B,~B|~D,D|A])
(A, True)
"""
for sym in symbols:
found_pos, found_neg = False, False
for c in unknown_clauses:
if not found_pos and sym in disjuncts(c): found_pos = True
if not found_neg and Not(sym) in disjuncts(c): found_neg = True
if found_pos != found_neg: return sym, found_pos
return None, None
def find_pure_symbol_int_repr(symbols, unknown_clauses):
"""
Same as find_pure_symbol, but arguments are expected
to be in integer representation
>>> from sympy.logic.algorithms.dpll import find_pure_symbol_int_repr
>>> find_pure_symbol_int_repr(set([1,2,3]), [set([1, -2]), set([-2, -3]), set([3, 1])])
(1, True)
"""
all_symbols = set()
for c in unknown_clauses:
all_symbols.update(c)
found_pos = all_symbols.intersection(symbols)
found_neg = all_symbols.intersection([-s for s in symbols])
for p in found_pos:
if -p not in found_neg:
return p, True
for p in found_neg:
if -p not in found_pos:
return -p, False
return None, None
def find_unit_clause(clauses, model):
"""
A unit clause has only 1 variable that is not bound in the model.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_unit_clause
>>> find_unit_clause([A | B | D, B | ~D, A | ~B], {A:True})
(B, False)
"""
for clause in clauses:
num_not_in_model = 0
for literal in disjuncts(clause):
sym = literal_symbol(literal)
if sym not in model:
num_not_in_model += 1
P, value = sym, not (literal.func is Not)
if num_not_in_model == 1:
return P, value
return None, None
def find_unit_clause_int_repr(clauses, model):
"""
Same as find_unit_clause, but arguments are expected to be in
integer representation.
>>> from sympy.logic.algorithms.dpll import find_unit_clause_int_repr
>>> find_unit_clause_int_repr([set([1, 2, 3]), set([2, -3]), set([1, -2])], {1: True})
(2, False)
"""
bound = set(model) | set(-sym for sym in model)
for clause in clauses:
unbound = clause - bound
if len(unbound) == 1:
p = unbound.pop()
if p < 0:
return -p, False
else:
return p, True
return None, None
|
{
"content_hash": "d06ee55b0310814e5eb9b49d37781e62",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 98,
"avg_line_length": 31.455172413793104,
"alnum_prop": 0.5959219469414602,
"repo_name": "pernici/sympy",
"id": "a769b6ba0cef063f230e6406446cfc462eeeb4dd",
"size": "9122",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sympy/logic/algorithms/dpll.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6531741"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "TeX",
"bytes": "8"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from classes.om import Density
class Seismic(Density):
tid = 'seismic'
_TID_FRIENDLY_NAME = 'Seismic'
_SHOWN_ATTRIBUTES = [
('_oid', 'Object Id')
]
def __init__(self, data, **attributes):
super().__init__(data, **attributes)
def _get_max_dimensions(self):
return 5
|
{
"content_hash": "475b3bd9bb1f10191d534285a4ed5d24",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 44,
"avg_line_length": 19.875,
"alnum_prop": 0.5691823899371069,
"repo_name": "giruenf/GRIPy",
"id": "402b0206858017aa817e6f71aec14356719b64ac",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classes/om/seismic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1593931"
}
],
"symlink_target": ""
}
|
import re
from django.core.exceptions import ValidationError
from django.core.validators import BaseValidator
from django.core.validators import MinLengthValidator
from django.core.validators import RegexValidator
from django.utils.translation import ugettext_lazy as _
from django.conf import settings as django_settings
from .exceptions import MissingDictionaryError
from .exceptions import MissingNamesError
settings = django_settings.STRONGER_PASSWORD
class HippaValidator(BaseValidator):
code = 'hippa_error'
message = _('Your password is not strong enough.')
def __init__(self, *args, **kwargs):
self._min_length = kwargs.get(
'length', settings.get('length', 6)
)
self._min_int_length = kwargs.get(
'number', settings.get('number', 1)
)
self._min_spec_length = kwargs.get(
'special',
settings.get('special', 1)
)
self._dictionary = kwargs.get(
'dictionary',
settings.get('dictionary', None)
)
self._names = kwargs.get(
'names', None
)
super(HippaValidator, self).__init__(
limit_value=None
)
def __call__(self, value):
self._errors = []
self._validate_min_length(value)
self._validate_min_int_length(value)
self._validate_contains_special_char(value)
if self._dictionary:
self._validate_dictionary(value)
if self._names:
self._validate_names(value)
if self._errors:
raise ValidationError(
self._errors
)
def _validate_min_length(self, value):
try:
LengthValidator(self._min_length)(value)
except ValidationError, e:
self._handle_exception(
LengthValidator(self._min_length).message % self._min_length,
e.code
)
def _validate_min_int_length(self, value):
try:
ContainsNumberValidator()(value)
except ValidationError, e:
self._handle_exception(
ContainsNumberValidator.message % self._min_int_length,
e.code
)
def _validate_contains_special_char(self, value):
try:
ContainsSpecialCharValidator()(value)
except ValidationError, e:
self._handle_exception(
ContainsSpecialCharValidator.message % self._min_spec_length,
e.code
)
def _validate_dictionary(self, value):
try:
DictionaryValidator(dictionary=self._dictionary)(value)
except ValidationError, e:
self._handle_exception(
DictionaryValidator.message,
e.code
)
def _validate_names(self, value):
try:
NameValidator(names=self._names)(value)
except ValidationError, e:
self._handle_exception(
NameValidator.message,
e.code
)
def _handle_exception(self, message, code):
if not len(self._errors):
self._append_error(self.message, self.code)
self._append_error(message, code)
def _append_error(self, message, code):
self._errors.append(
ValidationError(
message=message,
code=code
)
)
class LengthValidator(MinLengthValidator):
message = _('Must contain at least %s characters.')
limit_value = 0
code = 'min_length'
def __call__(self, value):
try:
MinLengthValidator(self.limit_value)(value)
except ValidationError:
raise ValidationError(
self.message % self.limit_value,
self.code
)
class ContainsNumberValidator(RegexValidator):
message = _('Must contain %s or more numbers.')
regex = re.compile(
r'[0-9]+',
flags=re.IGNORECASE
)
class ContainsSpecialCharValidator(RegexValidator):
message = _('Must contain %s or more special characters.')
code = 'special_chars'
regex = re.compile(
r'[$&*]+',
flags=re.IGNORECASE
)
class DictionaryValidator(RegexValidator):
message = _('Must not contain common words.')
code = 'dictionary_word'
def __init__(
self, dictionary=None # path, list or set
):
if isinstance(dictionary, basestring):
f = open(dictionary)
dictionary = f.read()
f.close()
dictionary = frozenset(
[word for word in dictionary.split('\n')]
)
if not isinstance(dictionary, list):
raise MissingDictionaryError()
self.regex = re.compile(
r'.*(%s).*' % '|'.join(dictionary),
flags=re.IGNORECASE
)
super(DictionaryValidator, self).__init__(
regex=self.regex,
message=self.message,
code=self.code,
inverse_match=True
)
class NameValidator(DictionaryValidator):
message = _('Must not contain your username or real name.')
code = 'name_validator'
def __init__(self, **kwargs):
dictionary = kwargs.pop('names', None)
kwargs.update({'dictionary': dictionary})
if not dictionary or not isinstance(dictionary, list):
raise MissingNamesError()
super(NameValidator, self).__init__(**kwargs)
|
{
"content_hash": "b2a3cc243536bd9293a9139b523b06f1",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 77,
"avg_line_length": 28.645833333333332,
"alnum_prop": 0.5729090909090909,
"repo_name": "eldest-daughter/django-strongerpassword",
"id": "9f59a65c123dba25bdb9a011067e2dc013d9b0f8",
"size": "5500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "strongerpassword/validators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12384"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import time
from proton.reactor import Reactor
# Not every event goes to the reactor's event handler. If we have a
# separate handler for something like a scheduled task, then those
# events aren't logged by the logger associated with the reactor's
# handler. Sometimes this is useful if you don't want to see them, but
# sometimes you want the global picture.
class Logger:
def on_unhandled(self, name, event):
print("LOG:", name, event)
class Task:
def on_timer_task(self, event):
print("Mission accomplished!")
class Program:
def on_reactor_init(self, event):
print("Hello, World!")
event.reactor.schedule(0, Task())
def on_reactor_final(self, event):
print("Goodbye, World!")
r = Reactor(Program())
# In addition to having a regular handler, the reactor also has a
# global handler that sees every event. By adding the Logger to the
# global handler instead of the regular handler, we can log every
# single event that occurs in the system regardless of whether or not
# there are specific handlers associated with the objects that are the
# target of those events.
r.global_handler.add(Logger())
r.run()
|
{
"content_hash": "8b821dc15765d69b0e86f029780233e6",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 31.102564102564102,
"alnum_prop": 0.720527617477329,
"repo_name": "Karm/qpid-proton",
"id": "3cbe11c21cba0fcb603e6d1f509e6db814162899",
"size": "2021",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/python/reactor/global-logger.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1234930"
},
{
"name": "C++",
"bytes": "312016"
},
{
"name": "CMake",
"bytes": "89386"
},
{
"name": "Go",
"bytes": "192743"
},
{
"name": "Groff",
"bytes": "420"
},
{
"name": "HTML",
"bytes": "8169"
},
{
"name": "Java",
"bytes": "1790509"
},
{
"name": "JavaScript",
"bytes": "244212"
},
{
"name": "PHP",
"bytes": "31076"
},
{
"name": "Perl",
"bytes": "100876"
},
{
"name": "Perl6",
"bytes": "878"
},
{
"name": "Python",
"bytes": "628676"
},
{
"name": "Ruby",
"bytes": "335237"
},
{
"name": "Shell",
"bytes": "11599"
}
],
"symlink_target": ""
}
|
"""Tests for Java Cache IDX file parser."""
import unittest
from plaso.parsers import java_idx
from tests.parsers import test_lib
class IDXTest(test_lib.ParserTestCase):
"""Tests for Java Cache IDX file parser."""
def testParse602(self):
"""Tests the Parse function on a version 602 IDX file."""
parser = java_idx.JavaIDXParser()
storage_writer = self._ParseFile(['java_602.idx'], parser)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 1)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 2)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'data_type': 'java:download:idx',
'downloaded_time': '2010-05-05T03:52:31+00:00',
'modification_time': '2010-05-05T01:34:19.720+00:00',
'idx_version': 602,
'url': 'http://www.gxxxxx.com/a/java/xxz.jar'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
def testParse605(self):
"""Tests the Parse function on a version 605 IDX file."""
parser = java_idx.JavaIDXParser()
storage_writer = self._ParseFile(['java.idx'], parser)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 1)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 2)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'data_type': 'java:download:idx',
'downloaded_time': '2013-01-13T16:22:01+00:00',
'modification_time': '2001-07-26T05:00:00.000+00:00',
'idx_version': 605,
'ip_address': '10.7.119.10',
'url': (
'http://xxxxc146d3.gxhjxxwsf.xx:82/forum/dare.php?'
'hsh=6&key=b30xxxx1c597xxxx15d593d3f0xxx1ab')}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "dded2ac508a6315ba0acd22be2353e11",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 34.64102564102564,
"alnum_prop": 0.6839378238341969,
"repo_name": "joachimmetz/plaso",
"id": "966b57be123ae0d9a13934368ffd35cb5c5ecddb",
"size": "2749",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/parsers/java_idx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4301"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "5345755"
},
{
"name": "Shell",
"bytes": "27279"
},
{
"name": "YARA",
"bytes": "507"
}
],
"symlink_target": ""
}
|
"""Setup/installation tests for this package."""
from dmc.sitecontent.testing import IntegrationTestCase
from plone import api
class TestInstall(IntegrationTestCase):
"""Test installation of dmc.sitecontent into Plone."""
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if dmc.sitecontent is installed with portal_quickinstaller."""
self.assertTrue(self.installer.isProductInstalled('dmc.sitecontent'))
def test_uninstall(self):
"""Test if dmc.sitecontent is cleanly uninstalled."""
self.installer.uninstallProducts(['dmc.sitecontent'])
self.assertFalse(self.installer.isProductInstalled('dmc.sitecontent'))
# browserlayer.xml
def test_browserlayer(self):
"""Test that IdmcSitecontentLayer is registered."""
from dmc.sitecontent.interfaces import IdmcSitecontentLayer
from plone.browserlayer import utils
self.failUnless(IdmcSitecontentLayer in utils.registered_layers())
|
{
"content_hash": "e92c0b590eb97b24fe85c6c64a5d4b09",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 39.724137931034484,
"alnum_prop": 0.7152777777777778,
"repo_name": "a25kk/dmc",
"id": "bf2ff43dd61877eca5a46a5eec45cf7ee8e54454",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dmc.sitecontent/dmc/sitecontent/tests/test_setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "76786"
},
{
"name": "HTML",
"bytes": "309563"
},
{
"name": "JavaScript",
"bytes": "28591"
},
{
"name": "Makefile",
"bytes": "2549"
},
{
"name": "Python",
"bytes": "26659"
},
{
"name": "Shell",
"bytes": "223"
}
],
"symlink_target": ""
}
|
from oslo_serialization import jsonutils
from nova.db import api as db
from nova.objects import base
from nova.objects import fields
@base.NovaObjectRegistry.register
class ResourceMetadata(base.NovaObject):
# Version 1.0: Initial version
VERSION = "1.0"
# This is parent object of specific resources.
# And it's used to be a object field of Resource,
# that is to say Resource.metadata.
def __eq__(self, other):
return base.all_things_equal(self, other)
def __ne__(self, other):
return not (self == other)
@base.NovaObjectRegistry.register
class Resource(base.NovaObject):
# Version 1.0: Initial version
VERSION = "1.0"
fields = {
# UUID of resource provider
'provider_uuid': fields.UUIDField(),
# resource class of the Resource
'resource_class': fields.ResourceClassField(),
# identifier is used to identify resource, it is up to virt drivers
# for mdev, it will be a UUID, for vpmem, it's backend namespace name
'identifier': fields.StringField(),
# metadata is used to contain virt driver specific resource info
'metadata': fields.ObjectField('ResourceMetadata', subclasses=True),
}
def __eq__(self, other):
return base.all_things_equal(self, other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
metadata = self.metadata if 'metadata' in self else None
return hash((self.provider_uuid, self.resource_class,
self.identifier, metadata))
@base.NovaObjectRegistry.register
class ResourceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = "1.0"
fields = {
'objects': fields.ListOfObjectsField('Resource'),
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['resources'])
if not db_extra or db_extra['resources'] is None:
return None
primitive = jsonutils.loads(db_extra['resources'])
resources = cls.obj_from_primitive(primitive)
return resources
@base.NovaObjectRegistry.register
class LibvirtVPMEMDevice(ResourceMetadata):
# Version 1.0: Initial version
VERSION = "1.0"
fields = {
# This is configured in file, used to generate resource class name
# CUSTOM_PMEM_NAMESPACE_$LABEL
'label': fields.StringField(),
# Backend pmem namespace's name
'name': fields.StringField(),
# Backend pmem namespace's size
'size': fields.IntegerField(),
# Backend device path
'devpath': fields.StringField(),
# Backend pmem namespace's alignment
'align': fields.IntegerField(),
}
def __hash__(self):
# Be sure all fields are set before using hash method
return hash((self.label, self.name, self.size,
self.devpath, self.align))
|
{
"content_hash": "3fdfeed8721a0dd1ba02853990095492",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 77,
"avg_line_length": 31.78125,
"alnum_prop": 0.6443788921665028,
"repo_name": "rahulunair/nova",
"id": "6ac97a57855e35f325f2e6e3b03953759277991e",
"size": "3657",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/objects/resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22804450"
},
{
"name": "Shell",
"bytes": "41649"
},
{
"name": "Smarty",
"bytes": "472764"
}
],
"symlink_target": ""
}
|
from time import time
from twisted.python import log
class Session(object):
"""A period of time that a Vehicle is online."""
def __init__(self, start_timestamp):
self.start_timestamp = start_timestamp
self.rides = []
self.in_progress = True
self.end_timestamp = None
def end(self, end_timestamp):
self.in_progress = False
self.end_timestamp = end_timestamp
def duration(self):
if self.in_progress:
return int(time()) - self.start_timestamp
else:
return end_timestamp - self.start_timestamp
def fields(self):
return { 'start_timestamp': self.start_timestamp,
'in_progress': self.in_progress,
'end_timestamp': self.end_timestamp,
'duration': self.duration(),
'rides': [r.fields() for r in self.rides] }
class Vehicle(object):
"""Represents a vehicle that may go on and offline. The vehicle may pick
up passengers while it is online."""
OFFLINE = 0
AVAILABLE = 1
TRANSIT = 2
def __init__(self, vehicle_id):
log.msg('A vehicle was created with id {!s}.'.format(vehicle_id))
self.vehicle_id = vehicle_id
self.sessions = []
self.status = Vehicle.OFFLINE
def all_rides(self):
[ride for ride in session.rides for session in self.sessions]
def new_session(self, start_timestamp):
self.sessions.append(Session(start_timestamp))
self.status = Vehicle.AVAILABLE
def end_session(self, end_timestamp):
if len(self.sessions) > 0:
self.sessions[-1].end(end_timestamp)
self.status = Vehicle.OFFLINE
def online(self):
return not self.status == Vehicle.OFFLINE
def pickup(self, ride):
if len(self.sessions) > 0:
self.sessions[-1].rides += [ride]
self.status = Vehicle.TRANSIT
def dropoff(self):
self.status = AVAILABLE
def available(self):
return self.status == Vehicle.AVAILABLE
def fields(self):
return { 'id': self.vehicle_id,
'available': self.available(),
'online': self.online(),
'sessions': [s.fields() for s in self.sessions] }
|
{
"content_hash": "1ddd9e6ce54cf7bebe84a1ecb4bcccc0",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 76,
"avg_line_length": 30.756756756756758,
"alnum_prop": 0.5900702987697716,
"repo_name": "aj-michael/Hailstorm",
"id": "3f71ce25777223c3b4ff7476089f4e27651cf3e2",
"size": "2276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/vehicle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24287"
},
{
"name": "Shell",
"bytes": "495"
}
],
"symlink_target": ""
}
|
from base64 import urlsafe_b64encode
import pytest
from satosa.context import Context
from satosa.exception import SATOSAError, SATOSAConfigurationError
from satosa.internal_data import InternalRequest
from satosa.micro_services.custom_routing import DecideIfRequesterIsAllowed
TARGET_ENTITY = "entity1"
@pytest.fixture
def target_context(context):
entityid_bytes = TARGET_ENTITY.encode("utf-8")
entityid_b64_str = urlsafe_b64encode(entityid_bytes).decode("utf-8")
context.decorate(Context.KEY_TARGET_ENTITYID, entityid_b64_str)
return context
class TestDecideIfRequesterIsAllowed:
def create_decide_service(self, rules):
decide_service = DecideIfRequesterIsAllowed(config=dict(rules=rules), name="test_decide_service",
base_url="https://satosa.example.com")
decide_service.next = lambda ctx, data: data
return decide_service
def test_allow_one_requester(self, target_context):
rules = {
TARGET_ENTITY: {
"allow": ["test_requester"],
}
}
decide_service = self.create_decide_service(rules)
req = InternalRequest(None, "test_requester", None)
assert decide_service.process(target_context, req)
req.requester = "somebody else"
with pytest.raises(SATOSAError):
decide_service.process(target_context, req)
@pytest.mark.parametrize("requester", [
"test_requester",
"somebody else"
])
def test_allow_all_requesters(self, target_context, requester):
rules = {
TARGET_ENTITY: {
"allow": ["*"],
}
}
decide_service = self.create_decide_service(rules)
req = InternalRequest(None, requester, None)
assert decide_service.process(target_context, req)
def test_deny_one_requester(self, target_context):
rules = {
TARGET_ENTITY: {
"deny": ["test_requester"],
}
}
decide_service = self.create_decide_service(rules)
req = InternalRequest(None, "test_requester", None)
with pytest.raises(SATOSAError):
assert decide_service.process(target_context, req)
@pytest.mark.parametrize("requester", [
"test_requester",
"somebody else"
])
def test_deny_all_requesters(self, target_context, requester):
rules = {
TARGET_ENTITY: {
"deny": ["*"],
}
}
decide_service = self.create_decide_service(rules)
req = InternalRequest(None, requester, None)
with pytest.raises(SATOSAError):
decide_service.process(target_context, req)
def test_allow_takes_precedence_over_deny_all(self, target_context):
requester = "test_requester"
rules = {
TARGET_ENTITY: {
"allow": requester,
"deny": ["*"],
}
}
decide_service = self.create_decide_service(rules)
req = InternalRequest(None, requester, None)
assert decide_service.process(target_context, req)
req.requester = "somebody else"
with pytest.raises(SATOSAError):
decide_service.process(target_context, req)
def test_deny_takes_precedence_over_allow_all(self, target_context):
requester = "test_requester"
rules = {
TARGET_ENTITY: {
"allow": ["*"],
"deny": [requester],
}
}
decide_service = self.create_decide_service(rules)
req = InternalRequest(None, requester, None)
with pytest.raises(SATOSAError):
decide_service.process(target_context, req)
req = InternalRequest(None, "somebody else", None)
decide_service.process(target_context, req)
@pytest.mark.parametrize("requester", [
"*",
"test_requester"
])
def test_deny_all_and_allow_all_should_raise_exception(self, requester):
rules = {
TARGET_ENTITY: {
"allow": [requester],
"deny": [requester],
}
}
with pytest.raises(SATOSAConfigurationError):
self.create_decide_service(rules)
def test_defaults_to_allow_all_requesters_for_target_entity_without_specific_rules(self, target_context):
rules = {
"some other entity": {
"allow": ["foobar"]
}
}
decide_service = self.create_decide_service(rules)
req = InternalRequest(None, "test_requester", None)
assert decide_service.process(target_context, req)
def test_missing_target_entity_id_from_context(self, context):
target_entity = "entity1"
rules = {
target_entity: {
"deny": ["*"],
}
}
decide_service = self.create_decide_service(rules)
req = InternalRequest(None, "test_requester", None)
with pytest.raises(SATOSAError):
decide_service.process(context, req)
|
{
"content_hash": "b19fedb6756dcaa107eadbb3104813fa",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 109,
"avg_line_length": 32.30379746835443,
"alnum_prop": 0.5920846394984326,
"repo_name": "irtnog/SATOSA",
"id": "46b7c6b4a037ea9fc0b7f336ed861f88317e66a9",
"size": "5104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/satosa/micro_services/test_custom_routing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "581"
},
{
"name": "Python",
"bytes": "526345"
},
{
"name": "Shell",
"bytes": "1866"
}
],
"symlink_target": ""
}
|
try:
from pint import UnitRegistry
except ImportError:
raise ImportError("The pint package is not installed, please install this library to use quantity fields.")
# Common unit registry
registry = UnitRegistry()
|
{
"content_hash": "8cb96a75fa0714ecc721d8bb96b84499",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 111,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.7737556561085973,
"repo_name": "tjmcewan/odin",
"id": "a1ce5f7ec0be1140efad6ea0e31ce19b5a43044e",
"size": "245",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "odin/contrib/pint/units.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--files",nargs="+")
parser.add_argument("--patfile", type=argparse.FileType("r"))
args = parser.parse_args()
import subprocess,os,os.path as osp
from control4.config import CTRL_ROOT
from control4.misc.test_utils import filelist_from_patterns
from control4.misc.console_utils import call_and_print,colorize
if args.files is None and args.patfile is None: args.patfile=open(osp.join(CTRL_ROOT,"maintenance/lintfiles.txt"),"r")
assert args.files is not None or args.patfile is not None
if args.files is not None:
filelist = args.files
elif args.patfile is not None:
filelist = filelist_from_patterns(args.patfile.readlines())
else:
raise Exception("unreachable")
rcfile = osp.join(CTRL_ROOT,"maintenance/pylintrc")
# lint = "python /Library/Python/2.7/site-packages/pylint/lint.py"
lint = "pylint"
if filelist is not None:
for fname in filelist:
result = call_and_print("%s -f colorized --rcfile %s -r n %s"%(lint, rcfile, fname),check=False)
else:
result = call_and_print("%s -f colorized --rcfile %s -r n *.py"%(lint,rcfile),check=False)
|
{
"content_hash": "f959b541777f7a8dadf6a9eb1d34f065",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 118,
"avg_line_length": 35.84375,
"alnum_prop": 0.7306015693112468,
"repo_name": "SFPD/rlreloaded",
"id": "c712dedafb20eb75922f9b8563d67ad8a4868e75",
"size": "1169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maintenance/run_pylint.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "753"
},
{
"name": "C++",
"bytes": "88527"
},
{
"name": "CMake",
"bytes": "33134"
},
{
"name": "Python",
"bytes": "478983"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db.models import Q
from django.core.exceptions import ValidationError
import re
APPEND_TO_SLUG = "-copy"
COPY_SLUG_REGEX = re.compile(r'^.*-copy(?:-(\d+)*)?$')
def is_valid_page_slug(page, parent, lang, slug, site, path=None):
"""Validates given slug depending on settings.
"""
from cms.models import Title
# Exclude the page with the publisher_state == page.PUBLISHER_STATE_DELETE
qs = Title.objects.filter(page__site=site)
if page.id:
qs = qs.exclude(
Q(page=page) |
Q(page=page.publisher_public) |
Q(page__publisher_state=page.PUBLISHER_STATE_DELETE)
)
if settings.i18n_installed:
qs = qs.filter(language=lang)
if not settings.CMS_FLAT_URLS:
if parent:
if parent.is_home():
qs = qs.filter(Q(page__parent=parent) |
Q(page__parent__isnull=True))
else:
qs = qs.filter(page__parent=parent)
else:
qs = qs.filter(page__parent__isnull=True)
if page.pk:
qs = qs.exclude(language=lang, page=page)
## Check for slugs
if qs.filter(slug=slug).count():
return False
## Check for path
if path and qs.filter(path=path).count():
return False
return True
def get_available_slug(title, new_slug=None):
"""Smart function generates slug for title if current title slug cannot be
used. Appends APPEND_TO_SLUG to slug and checks it again.
(Used in page copy function)
Returns: slug
"""
rewrite_slug = False
slug = new_slug or title.slug
# We need the full path for the title to check for conflicting urls
title.slug = slug
title.update_path()
path = title.path
# This checks for conflicting slugs/overwrite_url, for both published and unpublished pages
# This is a simpler check than in page_resolver.is_valid_url which
# takes into account actualy page URL
if not is_valid_page_slug(title.page, title.page.parent, title.language, slug, title.page.site, path):
if title.has_url_overwrite and is_valid_page_slug(title.page, title.page.parent, title.language, slug, title.page.site, None):
# The title has an overwrite url so a slug change will not change the path and
# the validation fails only because the path already exists.
return slug
# add nice copy attribute, first is -copy, then -copy-2, -copy-3, ....
match = COPY_SLUG_REGEX.match(slug)
if match:
try:
next = int(match.groups()[0]) + 1
slug = "-".join(slug.split('-')[:-1]) + "-%d" % next
except TypeError:
slug = slug + "-2"
else:
slug = slug + APPEND_TO_SLUG
return get_available_slug(title, slug)
else:
return slug
def check_title_slugs(page):
"""Checks page title slugs for duplicity if required, used after page move/
cut/paste.
"""
for title in page.title_set.all():
old_slug = title.slug
title.slug = get_available_slug(title)
if title.slug != old_slug:
title.save()
|
{
"content_hash": "98147c842f1bd05d4e6e39ed66e7779c",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 134,
"avg_line_length": 35.362637362637365,
"alnum_prop": 0.6093847110006215,
"repo_name": "pbs/django-cms",
"id": "4e2117873c06b71f1cfd7cead78ffc1cc3eefb2b",
"size": "3242",
"binary": false,
"copies": "1",
"ref": "refs/heads/support/2.3.x",
"path": "cms/utils/page.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "108603"
},
{
"name": "HTML",
"bytes": "289317"
},
{
"name": "JavaScript",
"bytes": "657946"
},
{
"name": "PHP",
"bytes": "4430"
},
{
"name": "Python",
"bytes": "2151038"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
import logging
import multiprocessing
import os
import pickle
import sys
# Add the root dir to the path
sys.path.append(os.path.abspath(os.path.join(sys.path[0],'../')))
# Custom libs
from lib.ids import IDS
from lib.printer import Printer
from lib.flags import *
flags['output_value'] = 'pager'
flags['sig'] = True
flags['sig_value'] = '5,7,8'
flags['violate'] = True
def load_dump():
dump_file = open(sys.argv[1], 'rb')
data = pickle.load(dump_file)
dump_file.close()
#src ip: list(data['all'].keys())[0]
#dst ip: list(data['all'][list(data['all'].keys())[0]]['targets'])[0]
data_length = len(data['all'][list(data['all'].keys())[0]]['targets'][list(data['all'][list(data['all'].keys())[0]]['targets'])[0]])
if data_length == 7 or data_length == 8 or data_length == 10:
flags['absolom'] = True
return data
def main():
if len(sys.argv) < 3:
print("Usage: {0} <result-dump> <tp, tn, fp, fn>".format(sys.argv[0]))
sys.exit()
data = load_dump()
ids = IDS()
ids.logger = logging.getLogger('IDS')
ids.extended = True
ids.flags = flags
ids.load_signature()
ids.data = data[sys.argv[2]]
if flags['threads'] == True:
threads = int(flags['threads_value'])
else:
threads = int(multiprocessing.cpu_count())
if threads < 1:
threads = 1
ids.threads = threads
ids.process_match()
ids.process_sort()
# Create a printing object
printer = Printer()
printer.logger = logging.getLogger('Printer')
printer.ids = ids
ids.process_sort()
printer.print_results()
if __name__ == "__main__":
main()
|
{
"content_hash": "3a505fce62e5d23a20501e0da1adb1b8",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 134,
"avg_line_length": 21.944444444444443,
"alnum_prop": 0.6310126582278481,
"repo_name": "ut-dacs/https-ids",
"id": "939eb057c5579537f787de809e859adc83e2b76e",
"size": "1703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/rematch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "7095"
},
{
"name": "Python",
"bytes": "187118"
},
{
"name": "Shell",
"bytes": "6716"
}
],
"symlink_target": ""
}
|
"""
A standalone partial implementation of sbt
https://www.solutionsbytext.com/api-support/api-documentation/
"""
from .resources import RequireVBT, RequestVBT, Carrier, SendTemplateMessage, GetMessageStatus
__all__ = [
'RequireVBT', 'SendTemplateMessage', 'Carrier', 'GetMessageStatus', 'RequestVBT'
]
__version__ = '0.0.1'
|
{
"content_hash": "ee8a80da5590eb156f6fe50cf9ed3fc7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 93,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.7357357357357357,
"repo_name": "worthwhile/sbt",
"id": "bcff984dd3f9fe4b5322f3efa2cbb42d16824889",
"size": "333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sbt/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5009"
}
],
"symlink_target": ""
}
|
from genericpath import isfile
import os
from os.path import isdir
from yaiep.core.WorkingMemory import WorkingMemory
##
# Fornisce un modo per poter garantire all'utente un'interazione più amichevole
# con il motore inferenziale
#
class UIManager:
##
# Richiede dei parametri all'utente per poter configurare la working memory
# @param wm: working memory da settare con i parametri immessi
#
@staticmethod
def get_input_from_user(wm, *params):
assert isinstance(wm, WorkingMemory)
is_template = params[2]
curr_value = None
if is_template:
slot_name = params[0]
fact = params[1]
templates_dict = wm.get_templates()
fact_name = fact.get_name()
if fact_name in templates_dict:
defined_template = templates_dict[fact_name]
curr_slot = defined_template.get_slot(slot_name)
curr_value = input('Insert value for \'{0}\' in {1} \n'
'(be careful to the following constraints - type({2}) / range ({3}) : '.\
format(slot_name, fact,
curr_slot.type if curr_slot.type else None,
curr_slot.range if curr_slot.range else None))
while not curr_slot.check_slot_value(curr_value):
print('Incorrect value specified...')
curr_value = input('Insert value for \'{0}\' in {1} \n'
'(be careful to the following constraints - type({2}) / range ({3}) : '.\
format(slot_name, fact,
curr_slot.type if curr_slot.type else None,
curr_slot.range if curr_slot.range else None))
else:
attr_id = params[0]
fact = params[1]
fact_attributes = fact.get_attributes()
len_attr = len(fact_attributes)
curr_value = []
for i in range(len_attr):
curr_value.append(input('Insert value for \'{0}\' in {1}->{2}: '.format(fact_attributes[i], fact.get_name(), fact.get_attributes())))
return curr_value
##
# Richiede all'utente se vuole proseguire la ricerca delle soluzione al problema preso in analisi
#
@staticmethod
def continue_search():
value = input('Are you satisfied with this solution? (y/n): ')
if value == 'n':
return True
else:
return False
##
# Richiede all'utente quale puzzle caricare in memoria
#
@staticmethod
def select_game():
GAMES_DIR = 'games'
DEFAULT_CONF_FILE = 'conf_file'
games = [x for x in os.listdir(GAMES_DIR) if
isdir(GAMES_DIR + "/" + x) and isfile(GAMES_DIR + "/" + x + "/" + DEFAULT_CONF_FILE)]
if len(games) < 1:
print("No games are present in the default directory (Do you have put them in the folder \'games\'?)")
return False
while True:
print("Available games:")
for i, game in enumerate(games):
print("\t" + str(i) + ") " + str(game))
try:
choosed = int(input('Select the puzzle that you want to solve: '))
print("You have chosen: " + str(games[choosed]))
print()
print("--------------------------------")
try:
return GAMES_DIR + os.sep + games[choosed] + os.sep, DEFAULT_CONF_FILE
except Exception as ex:
print('An error occurs :(')
print("--------------------------------")
print()
except (ValueError, TypeError):
return None, None
|
{
"content_hash": "a5bca17c488479e7ade021c17db93784",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 149,
"avg_line_length": 39.333333333333336,
"alnum_prop": 0.5087313816127376,
"repo_name": "aleSuglia/YAIEP",
"id": "6e70603e7c1524cf31559350fe8df26957f14a7c",
"size": "3895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yaiep/core/UIManager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "151155"
}
],
"symlink_target": ""
}
|
"""
This is just a stub to redirect a function to its new location. This module
and the containing package can be deleted after a few days.
"""
from metatools.deprecate import renamed_func
from sgfs.maya.workspace import open_parent_in_shotgun
# The only function that was exporter previously.
run = renamed_func(open_parent_in_shotgun, 'run', __name__)
|
{
"content_hash": "cb16cd47de8f30f1aad5f709b023883d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 27.615384615384617,
"alnum_prop": 0.7604456824512534,
"repo_name": "westernx/sgfs",
"id": "0d28e83b5d2dfd76ce8a02d9235e2f1c86416121",
"size": "359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgfs/mayatools/open_in_shotgun.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "236515"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
}
|
import copy
import hashlib
from bson.json_util import dumps
from datetime import datetime, timedelta
from eve.tests import TestBase
from eve.utils import parse_request, str_to_date, config, weak_date, \
date_to_str, querydef, document_etag, extract_key_values, \
debug_error_message, validate_filters
class TestUtils(TestBase):
""" collection, document and home_link methods (and resource_uri, which is
used by all of them) are tested in 'tests.methods' since we need an active
flaskapp context
"""
def setUp(self):
super(TestUtils, self).setUp()
self.dt_fmt = config.DATE_FORMAT
self.datestr = 'Tue, 18 Sep 2012 10:12:30 GMT'
self.valid = datetime.strptime(self.datestr, self.dt_fmt)
self.etag = '56eaadbbd9fa287e7270cf13a41083c94f52ab9b'
def test_parse_request_where(self):
self.app.config['DOMAIN'][self.known_resource]['allowed_filters'] = \
['ref']
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).where, None)
with self.app.test_request_context('/?where=hello'):
self.assertEqual(parse_request(self.known_resource).where, 'hello')
def test_parse_request_sort(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).sort, None)
with self.app.test_request_context('/?sort=hello'):
self.assertEqual(parse_request(self.known_resource).sort, 'hello')
def test_parse_request_page(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=2'):
self.assertEqual(parse_request(self.known_resource).page, 2)
with self.app.test_request_context('/?page=-1'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=0'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=1.1'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=string'):
self.assertEqual(parse_request(self.known_resource).page, 1)
def test_parse_request_max_results(self):
default = config.PAGINATION_DEFAULT
limit = config.PAGINATION_LIMIT
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=%d' % (limit + 1)):
self.assertEqual(parse_request(self.known_resource).max_results,
limit)
with self.app.test_request_context('/?max_results=2'):
self.assertEqual(parse_request(self.known_resource).max_results, 2)
with self.app.test_request_context('/?max_results=-1'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=0'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=1.1'):
self.assertEqual(parse_request(self.known_resource).max_results, 1)
with self.app.test_request_context('/?max_results=string'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
def test_parse_request_max_results_disabled_pagination(self):
self.app.config['DOMAIN'][self.known_resource]['pagination'] = False
default = 0
limit = config.PAGINATION_LIMIT
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=%d' % (limit + 1)):
self.assertEqual(parse_request(self.known_resource).max_results,
limit + 1)
with self.app.test_request_context('/?max_results=2'):
self.assertEqual(parse_request(self.known_resource).max_results, 2)
with self.app.test_request_context('/?max_results=-1'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=0'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=1.1'):
self.assertEqual(parse_request(self.known_resource).max_results, 1)
with self.app.test_request_context('/?max_results=string'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
def test_parse_request_if_modified_since(self):
ims = 'If-Modified-Since'
with self.app.test_request_context():
self.assertEqual(parse_request(
self.known_resource).if_modified_since, None)
with self.app.test_request_context(headers=None):
self.assertEqual(
parse_request(self.known_resource).if_modified_since, None)
with self.app.test_request_context(headers={ims: self.datestr}):
self.assertEqual(
parse_request(self.known_resource).if_modified_since,
self.valid + timedelta(seconds=1))
with self.app.test_request_context(headers={ims: 'not-a-date'}):
self.assertRaises(ValueError, parse_request, self.known_resource)
with self.app.test_request_context(
headers={ims:
self.datestr.replace('GMT', 'UTC')}):
self.assertRaises(ValueError, parse_request, self.known_resource)
self.assertRaises(ValueError, parse_request, self.known_resource)
def test_parse_request_if_none_match(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).if_none_match,
None)
with self.app.test_request_context(headers=None):
self.assertEqual(parse_request(self.known_resource).if_none_match,
None)
with self.app.test_request_context(headers={'If-None-Match':
self.etag}):
self.assertEqual(parse_request(self.known_resource).if_none_match,
self.etag)
def test_parse_request_if_match(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).if_match, None)
with self.app.test_request_context(headers=None):
self.assertEqual(parse_request(self.known_resource).if_match, None)
with self.app.test_request_context(headers={'If-Match': self.etag}):
self.assertEqual(parse_request(self.known_resource).if_match,
self.etag)
def test_weak_date(self):
with self.app.test_request_context():
self.app.config['DATE_FORMAT'] = '%Y-%m-%d'
self.assertEqual(weak_date(self.datestr), self.valid +
timedelta(seconds=1))
def test_str_to_date(self):
self.assertEqual(str_to_date(self.datestr), self.valid)
self.assertRaises(ValueError, str_to_date, 'not-a-date')
self.assertRaises(ValueError, str_to_date,
self.datestr.replace('GMT', 'UTC'))
def test_date_to_str(self):
self.assertEqual(date_to_str(self.valid), self.datestr)
def test_querydef(self):
self.assertEqual(querydef(max_results=10), '?max_results=10')
self.assertEqual(querydef(page=10), '?page=10')
self.assertEqual(querydef(where='wherepart'), '?where=wherepart')
self.assertEqual(querydef(sort='sortpart'), '?sort=sortpart')
self.assertEqual(querydef(where='wherepart', sort='sortpart'),
'?where=wherepart&sort=sortpart')
self.assertEqual(querydef(max_results=10, sort='sortpart'),
'?max_results=10&sort=sortpart')
def test_document_etag(self):
test = {'key1': 'value1', 'another': 'value2'}
challenge = dumps(test, sort_keys=True).encode('utf-8')
with self.app.test_request_context():
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test))
def test_document_etag_ignore_fields(self):
test = {'key1': 'value1', 'key2': 'value2'}
ignore_fields = ["key2"]
test_without_ignore = {'key1': 'value1'}
challenge = dumps(test_without_ignore, sort_keys=True).encode('utf-8')
with self.app.test_request_context():
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test, ignore_fields))
# not required fields can not be present
test = {'key1': 'value1', 'key2': 'value2'}
ignore_fields = ["key3"]
test_without_ignore = {'key1': 'value1', 'key2': 'value2'}
challenge = dumps(test_without_ignore, sort_keys=True).encode('utf-8')
with self.app.test_request_context():
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test, ignore_fields))
# ignore fiels nested using doting notation
test = {'key1': 'value1', 'dict': {'key2': 'value2', 'key3': 'value3'}}
ignore_fields = ['dict.key2']
test_without_ignore = {'key1': 'value1', 'dict': {'key3': 'value3'}}
challenge = dumps(test_without_ignore, sort_keys=True).encode('utf-8')
with self.app.test_request_context():
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test, ignore_fields))
def test_extract_key_values(self):
test = {
'key1': 'value1',
'key2': {
'key1': 'value2',
'nested': {
'key1': 'value3'
}
}
}
self.assertEqual(list(extract_key_values('key1', test)),
['value1', 'value2', 'value3'])
def test_debug_error_message(self):
with self.app.test_request_context():
self.app.config['DEBUG'] = False
self.assertEqual(debug_error_message('An error message'), None)
self.app.config['DEBUG'] = True
self.assertEqual(debug_error_message('An error message'),
'An error message')
def test_validate_filters(self):
self.app.config['DOMAIN'][self.known_resource]['allowed_filters'] = []
with self.app.test_request_context():
self.assertTrue('key' in validate_filters(
{'key': 'val'},
self.known_resource))
self.assertTrue('key' in validate_filters(
{'key': ['val1', 'val2']},
self.known_resource))
self.assertTrue('key' in validate_filters(
{'key': {'$in': ['val1', 'val2']}},
self.known_resource))
self.assertTrue('key' in validate_filters(
{'$or': [{'key': 'val1'}, {'key': 'val2'}]},
self.known_resource))
self.assertTrue('$or' in validate_filters(
{'$or': 'val'},
self.known_resource))
self.assertTrue('$or' in validate_filters(
{'$or': {'key': 'val1'}},
self.known_resource))
self.assertTrue('$or' in validate_filters(
{'$or': ['val']},
self.known_resource))
self.app.config['DOMAIN'][self.known_resource]['allowed_filters'] = \
['key']
with self.app.test_request_context():
self.assertTrue(validate_filters(
{'key': 'val'},
self.known_resource) is None)
self.assertTrue(validate_filters(
{'key': ['val1', 'val2']},
self.known_resource) is None)
self.assertTrue(validate_filters(
{'key': {'$in': ['val1', 'val2']}},
self.known_resource) is None)
self.assertTrue(validate_filters(
{'$or': [{'key': 'val1'}, {'key': 'val2'}]},
self.known_resource) is None)
class DummyEvent(object):
"""
Even handler that records the call parameters and asserts a check
Usage::
app = Eve()
app.on_my_event = DummyEvent(element_not_deleted)
In the test::
assert app.on_my_event.called[0] == expected_param_0
"""
def __init__(self, check, deepcopy=False):
"""
:param check: method checking the state of something during the event.
:type: check: callable returning bool
:param deepcopy: Do we need to store a copy of the argument calls? In
some events arguments are changed after the event, so keeping a
reference to the original object doesn't allow a test to check what
was passed. The default is False.
:type deepcopy: bool
"""
self.__called = None
self.__check = check
self.__deepcopy = deepcopy
def __call__(self, *args):
assert self.__check()
# In some method the arguments are changed after the events
if self.__deepcopy:
args = copy.deepcopy(args)
self.__called = args
@property
def called(self):
"""
The results of the call to the event.
:rtype: It returns None if the event hasn't been called or a tuple with
the positional arguments of the last call if called.
"""
return self.__called
|
{
"content_hash": "640d8f899213bc8a1ed4abe2011e13e5",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 79,
"avg_line_length": 46.118032786885244,
"alnum_prop": 0.5844589790985355,
"repo_name": "hustlzp/eve",
"id": "fc2a66a508ed4c148ee456b4108137092dbb760b",
"size": "14091",
"binary": false,
"copies": "16",
"ref": "refs/heads/develop",
"path": "eve/tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "685473"
}
],
"symlink_target": ""
}
|
import unittest2 as unittest
from nose.plugins.attrib import attr
from mock import MagicMock, patch
import sys
from jnpr.junos.console import Console
if sys.version < '3':
builtin_string = '__builtin__'
else:
builtin_string = 'builtins'
@attr('unit')
class TestSerial(unittest.TestCase):
@patch('jnpr.junos.transport.tty_serial.serial.Serial.open')
@patch('jnpr.junos.transport.tty_serial.serial.Serial.write')
@patch('jnpr.junos.transport.tty_serial.serial.Serial.flush')
@patch('jnpr.junos.transport.tty_serial.Serial.read_prompt')
@patch('jnpr.junos.transport.tty.tty_netconf.open')
def setUp(self, mock_nc_open, mock_read,
mock_flush, mock_write, mock_open):
self.dev = Console(port='USB/ttyUSB0', baud=9600, mode='Serial')
mock_read.side_effect = [
('login', 'login'), ('passwd', 'passwd'), ('shell', 'shell')]
self.dev.open()
@patch('jnpr.junos.transport.tty.sleep')
@patch('jnpr.junos.transport.tty.tty_netconf.close')
@patch('jnpr.junos.transport.tty_serial.Serial.read_prompt')
@patch('jnpr.junos.transport.tty_serial.Serial.write')
@patch('jnpr.junos.transport.tty_serial.Serial._tty_close')
def tearDown(self, mock_serial_close, mock_write, mock_read, mock_close,
mock_sleep):
# mock_read.side_effect = [('shell', 'shell'), ('login', 'login'),
mock_read.side_effect = [('shell', 'shell'), ('login', 'login'),
('cli', 'cli'), ]
self.dev.close()
def test_console_connected(self):
self.assertTrue(self.dev.connected)
def test_close_connection(self):
self.dev._tty._ser = MagicMock()
self.dev.close(skip_logout=True)
self.assertTrue(self.dev._tty._ser.close.called)
@patch('jnpr.junos.transport.tty_serial.serial.Serial.open')
def test_tty_serial_open_exception(self, mock_open):
dev = Console(port='USB/ttyUSB0', baud=9600, mode='Serial')
mock_open.side_effect = OSError
self.assertRaises(RuntimeError, dev.open)
def test_tty_serial_rawwrite(self):
self.dev._tty._ser = MagicMock()
self.dev._tty.rawwrite('test')
self.dev._tty._ser.write.assert_called_with('test')
def test_tty_serial_read(self):
self.dev._tty._ser = MagicMock()
self.dev._tty.read()
self.dev._tty._ser.readline.assert_is_called()
def test_tty_serial_read_prompt(self):
self.dev._tty._ser = MagicMock()
self.dev._tty.EXPECT_TIMEOUT = 0.1
self.dev._tty._ser.readline.side_effect = ['', 'test']
self.assertEqual(self.dev._tty.read_prompt()[0], None)
|
{
"content_hash": "e299c976dc79c4e969ff9055af8fb074",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 38.82608695652174,
"alnum_prop": 0.6364315042926465,
"repo_name": "fostasha/pynet_test",
"id": "8a1a5e1f9838530f3c455047f588517b7b39893b",
"size": "2679",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "py-junos-eznc/tests/unit/transport/test_serial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6465"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Pascal",
"bytes": "408"
},
{
"name": "Puppet",
"bytes": "2263"
},
{
"name": "Python",
"bytes": "258744"
},
{
"name": "Ruby",
"bytes": "4840"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from stackclient.views import RegisterView
urlpatterns = patterns('',
url(r'^register$', RegisterView.as_view(), name='register'),
)
|
{
"content_hash": "6c04d7ccf8db30bbadc29dc8e8c7848d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 83,
"avg_line_length": 25.25,
"alnum_prop": 0.6633663366336634,
"repo_name": "JamesMura/furry-meme-py",
"id": "7d543ca7f259e502c9201ede7cba8e7be40c9d91",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stackclient/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
from fabric.api import *
from fabric.tasks import execute
env.roledefs = {
'qemu': ['ec2-user@qemu-us1.cloudpebble.net', 'ec2-user@qemu-us2.cloudpebble.net'],
'ycmd': ['root@ycm2.cloudpebble.net', 'root@ycm3.cloudpebble.net', 'root@ycm4.cloudpebble.net'],
}
env.key_filename = ['~/.ssh/id_rsa', '~/Downloads/katharine-keypair.pem']
@task
@roles('qemu')
@parallel
def update_qemu_service():
with cd("cloudpebble-qemu-controller"):
run("git pull")
run("git submodule update --init --recursive")
with prefix(". .env/bin/activate"):
run("pip install -r requirements.txt")
sudo("restart cloudpebble-qemu")
@task
@roles('qemu')
@parallel
def update_qemu_sdk():
with cd('qemu'):
run("git pull")
run("make -j8")
with cd("qemu-tintin-images"):
run("git pull")
with cd("pypkjs"):
run("git pull")
run("git submodule update --init --recursive")
with prefix(". .env/bin/activate"):
run("pip install -r requirements.txt")
@task
@roles('qemu')
@parallel
def restart_qemu_service():
sudo("restart cloudpebble-qemu")
@task
@roles('ycmd')
@parallel
def update_ycmd_sdk(sdk_version):
with cd("/home/ycm"), settings(sudo_user="ycm", shell="/bin/bash -c"):
sudo("wget -nv -O sdk.tar.gz https://sdk.getpebble.com/download/%s?source=cloudpebble" % sdk_version)
sudo("tar -xf sdk.tar.gz")
sudo("rm -rf sdk3")
sudo("mv PebbleSDK-%s sdk3" % sdk_version)
@task
@roles('ycmd')
@parallel
def update_ycmd_service():
with cd("/home/ycm/proxy"), settings(sudo_user="ycm", shell="/bin/bash -c"):
sudo("git pull")
run("pip install --upgrade -r requirements.txt")
run("restart ycmd-proxy")
@task
@roles('ycmd')
@parallel
def restart_ycmd_service():
run("restart ycmd-proxy")
@task
def deploy_heroku():
local("git push heroku master")
@task
def restart_heroku():
local("heroku restart -a cloudpebble")
@task
def update_all_services():
execute(update_qemu_service)
execute(update_ycmd_service)
execute(deploy_heroku)
@task
def restart_everything():
execute(restart_qemu_service)
execute(restart_ycmd_service)
execute(restart_heroku)
@task
@runs_once
def update_qemu_images(sdk_version):
# Merge conflicts are no fun.
with lcd("~/projects/qemu-tintin-images"):
local("git pull")
with lcd("~/projects/tintin"):
local("git checkout v%s" % sdk_version)
with prefix(". .env/bin/activate"):
local("pypy ./waf configure --board=snowy_bb --qemu --release --sdkshell build qemu_image_spi qemu_image_micro")
local("cp build/qemu_* ~/projects/qemu-tintin-images/basalt/3.0/")
with lcd("~/projects/qemu-tintin-images"):
local("git commit -a -m 'Update to v%s'" % sdk_version)
local("git push")
@task
@runs_once
def update_cloudpebble_sdk(sdk_version):
local("sed -i.bak 's/download\/3.[a-z0-9-]*/download\/%s/' bin/post_compile bootstrap.sh" % sdk_version)
local("git add bin/post_compile bootstrap.sh")
local("git commit -m 'Update to v%s'" % sdk_version)
local("git push")
execute(deploy_heroku)
@task
def update_sdk(sdk_version):
execute(update_qemu_images, sdk_version)
execute(update_qemu_sdk)
execute(update_ycmd_sdk, sdk_version)
execute(update_cloudpebble_sdk, sdk_version)
@task
def update_all(sdk_version):
execute(update_qemu_images, sdk_version)
execute(update_qemu_sdk)
execute(update_qemu_service)
execute(update_ycmd_sdk, sdk_version)
execute(update_ycmd_service)
execute(update_cloudpebble_sdk, sdk_version)
|
{
"content_hash": "c1ec4f05474806f8486fd1638ff79ee3",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 124,
"avg_line_length": 26.02836879432624,
"alnum_prop": 0.6474114441416894,
"repo_name": "math-foo/cloudpebble",
"id": "3ea5069b138e3dcc8cf771dc9f30c714865526a9",
"size": "4153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "62120"
},
{
"name": "HTML",
"bytes": "94707"
},
{
"name": "JavaScript",
"bytes": "380016"
},
{
"name": "Python",
"bytes": "573889"
},
{
"name": "Shell",
"bytes": "7204"
}
],
"symlink_target": ""
}
|
import sys
import getpass
import cx_Oracle # the package used for accessing Oracle in Python
import login
import menu
import bookings
import user
import search
def connect(connection_url):
menu.clearScreen()
print("Connect to Airline Booking Database\n")
# get username
user = input("Oracle Username: ")
if not user:
user = getpass.getuser()
# get password
pw = getpass.getpass()
# The URL we are connnecting to
conString = '' + user + '/' + pw + connection_url
print("\nConnecting...\n")
try:
# Establish a connection in Python
connection = cx_Oracle.connect(conString)
print("Connected!")
return connection
except cx_Oracle.DatabaseError as exc:
error, = exc.args
print(sys.stderr, "Oracle code:", error.code)
print(sys.stderr, "Oracle message:", error.message)
def process(option, connection, current_user):
if option == 0:
search.search_flights(connection,str(current_user.email))
if option == 1:
bookings.list(connection,str(current_user.email))
if option == 2:
login.logout(connection, str(current_user.email))
if option == 3:
user.record_dep(connection)
if option == 4:
user.record_arr(connection)
def cursor(connection = None):
return connection.cursor()
def close(connection = None, cursor = None):
# close the connection
if cursor != None:
cursor.close()
if connection !=None:
connection.close()
def read(query = None, cursor = None):
if query != None and cursor!= None:
# read from a table
cursor.execute(query)
# get all data and print it
rows = cursor.fetchall()
result = list()
for row in rows:
for x in row:
result.append(x)
return result
|
{
"content_hash": "b519df85dadbf1fa4b54bdefc0e8e862",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 67,
"avg_line_length": 24.075949367088608,
"alnum_prop": 0.6088328075709779,
"repo_name": "k----n/AirlineBooking",
"id": "604f2094a8d3f1e3a2106774930ba1adbd3fa3ed",
"size": "2501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "database.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "50056"
}
],
"symlink_target": ""
}
|
"""Convenience functions for opening GUIs."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD-3-Clause
from ..utils import verbose, get_config, warn
@verbose
def coregistration(tabbed=False, split=True, width=None, inst=None,
subject=None, subjects_dir=None, guess_mri_subject=None,
height=None, head_opacity=None, head_high_res=None,
trans=None, scrollable=True, *,
orient_to_surface=True, scale_by_distance=True,
mark_inside=True, interaction=None, scale=None,
advanced_rendering=None, head_inside=True,
fullscreen=None, show=True, block=False, verbose=None):
"""Coregister an MRI with a subject's head shape.
The GUI can be launched through the command line interface:
.. code-block:: bash
$ mne coreg
or using a python interpreter as shown in :ref:`tut-source-alignment`.
Parameters
----------
tabbed : bool
Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
width : int | None
Specify the width for window (in logical pixels).
Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value
(which defaults to 800).
inst : None | str
Path to an instance file containing the digitizer data. Compatible for
Raw, Epochs, and Evoked files.
subject : None | str
Name of the mri subject.
%(subjects_dir)s
guess_mri_subject : bool
When selecting a new head shape file, guess the subject's name based
on the filename and change the MRI subject accordingly (default True).
height : int | None
Specify a height for window (in logical pixels).
Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value
(which defaults to 400).
head_opacity : float | None
The opacity of the head surface in the range [0., 1.].
Default is None, which uses ``MNE_COREG_HEAD_OPACITY`` config value
(which defaults to 1.).
head_high_res : bool | None
Use a high resolution head surface.
Default is None, which uses ``MNE_COREG_HEAD_HIGH_RES`` config value
(which defaults to True).
trans : str | None
The transform file to use.
scrollable : bool
Make the coregistration panel vertically scrollable (default True).
orient_to_surface : bool | None
If True (default), orient EEG electrode and head shape points
to the head surface.
.. versionadded:: 0.16
scale_by_distance : bool | None
If True (default), scale the digitization points by their
distance from the scalp surface.
.. versionadded:: 0.16
mark_inside : bool | None
If True (default), mark points inside the head surface in a
different color.
.. versionadded:: 0.16
%(interaction_scene_none)s
Defaults to ``'terrain'``.
.. versionadded:: 0.16
.. versionchanged:: 1.0
Default interaction mode if ``None`` and no config setting found
changed from ``'trackball'`` to ``'terrain'``.
scale : float | None
The scaling for the scene.
.. versionadded:: 0.16
advanced_rendering : bool
Use advanced OpenGL rendering techniques (default True).
For some renderers (such as MESA software) this can cause rendering
bugs.
.. versionadded:: 0.18
head_inside : bool
If True (default), add opaque inner scalp head surface to help occlude
points behind the head.
.. versionadded:: 0.23
%(fullscreen)s
Default is None, which uses ``MNE_COREG_FULLSCREEN`` config value
(which defaults to False).
.. versionadded:: 1.1
show : bool
Show the GUI if True.
block : bool
Whether to halt program execution until the figure is closed.
%(verbose)s
Returns
-------
frame : instance of CoregistrationUI
The coregistration frame.
Notes
-----
Many parameters (e.g., ``head_opacity``) take None as a parameter,
which means that the default will be read from the MNE-Python
configuration file (which gets saved when exiting).
Step by step instructions for the coregistrations are shown below:
.. youtube:: uK4n5g6DBcg
"""
unsupported_params = {
'tabbed': (tabbed, False),
'split': (split, True),
'scrollable': (scrollable, True),
'head_inside': (head_inside, True),
'guess_mri_subject': guess_mri_subject,
'scale': scale,
'advanced_rendering': advanced_rendering,
}
for key, val in unsupported_params.items():
if isinstance(val, tuple):
to_raise = val[0] != val[1]
else:
to_raise = val is not None
if to_raise:
warn(f"The parameter {key} is not supported with"
" the pyvistaqt 3d backend. It will be ignored.")
config = get_config()
if guess_mri_subject is None:
guess_mri_subject = config.get(
'MNE_COREG_GUESS_MRI_SUBJECT', 'true') == 'true'
if head_high_res is None:
head_high_res = config.get('MNE_COREG_HEAD_HIGH_RES', 'true') == 'true'
if advanced_rendering is None:
advanced_rendering = \
config.get('MNE_COREG_ADVANCED_RENDERING', 'true') == 'true'
if head_opacity is None:
head_opacity = config.get('MNE_COREG_HEAD_OPACITY', 0.8)
if head_inside is None:
head_inside = \
config.get('MNE_COREG_HEAD_INSIDE', 'true').lower() == 'true'
if width is None:
width = config.get('MNE_COREG_WINDOW_WIDTH', 800)
if height is None:
height = config.get('MNE_COREG_WINDOW_HEIGHT', 600)
if subjects_dir is None:
if 'SUBJECTS_DIR' in config:
subjects_dir = config['SUBJECTS_DIR']
elif 'MNE_COREG_SUBJECTS_DIR' in config:
subjects_dir = config['MNE_COREG_SUBJECTS_DIR']
if orient_to_surface is None:
orient_to_surface = (config.get('MNE_COREG_ORIENT_TO_SURFACE', '') ==
'true')
if scale_by_distance is None:
scale_by_distance = (config.get('MNE_COREG_SCALE_BY_DISTANCE', '') ==
'true')
if interaction is None:
interaction = config.get('MNE_COREG_INTERACTION', 'terrain')
if mark_inside is None:
mark_inside = config.get('MNE_COREG_MARK_INSIDE', '') == 'true'
if scale is None:
scale = config.get('MNE_COREG_SCENE_SCALE', 0.16)
if fullscreen is None:
fullscreen = config.get('MNE_COREG_FULLSCREEN', '') == 'true'
head_opacity = float(head_opacity)
head_inside = bool(head_inside)
width = int(width)
height = int(height)
scale = float(scale)
from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING
from ._coreg import CoregistrationUI
if MNE_3D_BACKEND_TESTING:
show = block = False
return CoregistrationUI(
info_file=inst, subject=subject, subjects_dir=subjects_dir,
head_resolution=head_high_res, head_opacity=head_opacity,
orient_glyphs=orient_to_surface, scale_by_distance=scale_by_distance,
mark_inside=mark_inside, trans=trans, size=(width, height), show=show,
block=block, interaction=interaction, fullscreen=fullscreen,
verbose=verbose
)
@verbose
def locate_ieeg(info, trans, aligned_ct, subject=None, subjects_dir=None,
groups=None, show=True, block=False, verbose=None):
"""Locate intracranial electrode contacts.
Parameters
----------
%(info_not_none)s
%(trans_not_none)s
aligned_ct : path-like | nibabel.spatialimages.SpatialImage
The CT image that has been aligned to the Freesurfer T1. Path-like
inputs and nibabel image objects are supported.
%(subject)s
%(subjects_dir)s
groups : dict | None
A dictionary with channels as keys and their group index as values.
If None, the groups will be inferred by the channel names. Channel
names must have a format like ``LAMY 7`` where a string prefix
like ``LAMY`` precedes a numeric index like ``7``. If the channels
are formatted improperly, group plotting will work incorrectly.
Group assignments can be adjusted in the GUI.
show : bool
Show the GUI if True.
block : bool
Whether to halt program execution until the figure is closed.
%(verbose)s
Returns
-------
gui : instance of IntracranialElectrodeLocator
The graphical user interface (GUI) window.
"""
from ..viz.backends._utils import _qt_app_exec
from ._ieeg_locate_gui import IntracranialElectrodeLocator
from qtpy.QtWidgets import QApplication
# get application
app = QApplication.instance()
if app is None:
app = QApplication(["Intracranial Electrode Locator"])
gui = IntracranialElectrodeLocator(
info, trans, aligned_ct, subject=subject, subjects_dir=subjects_dir,
groups=groups, show=show, verbose=verbose)
if block:
_qt_app_exec(app)
return gui
class _GUIScraper(object):
"""Scrape GUI outputs."""
def __repr__(self):
return '<GUIScraper>'
def __call__(self, block, block_vars, gallery_conf):
from ._ieeg_locate_gui import IntracranialElectrodeLocator
from ._coreg import CoregistrationUI
from sphinx_gallery.scrapers import figure_rst
from qtpy import QtGui
for gui in block_vars['example_globals'].values():
if (isinstance(gui, (IntracranialElectrodeLocator,
CoregistrationUI)) and
not getattr(gui, '_scraped', False) and
gallery_conf['builder_name'] == 'html'):
gui._scraped = True # monkey-patch but it's easy enough
img_fname = next(block_vars['image_path_iterator'])
# TODO fix in window refactor
window = gui if hasattr(gui, 'grab') else gui._renderer._window
# window is QWindow
# https://doc.qt.io/qt-5/qwidget.html#grab
pixmap = window.grab()
if hasattr(gui, '_renderer'): # if no renderer, no need
# Now the tricky part: we need to get the 3D renderer,
# extract the image from it, and put it in the correct
# place in the pixmap. The easiest way to do this is
# actually to save the 3D image first, then load it
# using QPixmap and Qt geometry.
plotter = gui._renderer.plotter
plotter.screenshot(img_fname)
sub_pixmap = QtGui.QPixmap(img_fname)
# https://doc.qt.io/qt-5/qwidget.html#mapTo
# https://doc.qt.io/qt-5/qpainter.html#drawPixmap-1
QtGui.QPainter(pixmap).drawPixmap(
plotter.mapTo(window, plotter.rect().topLeft()),
sub_pixmap)
# https://doc.qt.io/qt-5/qpixmap.html#save
pixmap.save(img_fname)
try: # for compatibility with both GUIs, will be refactored
gui._renderer.close() # TODO should be triggered by close
except Exception:
pass
gui.close()
return figure_rst(
[img_fname], gallery_conf['src_dir'], 'GUI')
return ''
|
{
"content_hash": "b5dbbdcc4951ff7dc871be9e0c50616b",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 79,
"avg_line_length": 39.777027027027025,
"alnum_prop": 0.6065058603703074,
"repo_name": "pravsripad/mne-python",
"id": "569e01b396257ff5725033741baa8c58af0cafc6",
"size": "11774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master_dev",
"path": "mne/gui/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "13067"
},
{
"name": "Makefile",
"bytes": "4528"
},
{
"name": "Python",
"bytes": "10058139"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "20004"
}
],
"symlink_target": ""
}
|
import sys
import cyclone.auth
import cyclone.escape
import cyclone.web
from twisted.python import log
from twisted.internet import reactor
from pycket.session import SessionMixin
class Application(cyclone.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login", AuthHandler),
(r"/auth/logout", LogoutHandler),
]
settings = dict(
cookie_secret="32oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
debug=True,
login_url="/auth/login",
logout_url="/auth/logout",
)
settings['pycket'] = {
'engine': 'redis',
'storage': {
'host': 'localhost',
'port': 6379,
'db_sessions': 10,
'db_notifications': 11
}
}
cyclone.web.Application.__init__(self, handlers, **settings)
class BaseHandler(cyclone.web.RequestHandler, SessionMixin):
def get_current_user(self):
user = self.session.get('user')
if not user:
return None
return user
class MainHandler(BaseHandler):
@cyclone.web.authenticated
def get(self):
name = cyclone.escape.xhtml_escape(self.current_user)
self.write("Hello, " + name)
self.write("<br><br><a href=\"/auth/logout\">Log out</a>")
class AuthHandler(BaseHandler, SessionMixin):
def get(self):
self.write('<form method="post">'
'Enter your username: <input name="username" type="text">'
'<button type="submit" class="btn">Login</button></form>')
def post(self):
username = self.get_argument('username')
if not username:
self.write('<form method="post">Enter your username: '
'<input name="username" type="text">'
'<button type="submit" class="btn">Login</button>'
'</form>')
else:
self.session.set('user', username)
self.redirect('/')
class LogoutHandler(BaseHandler, SessionMixin):
def get(self):
self.session.delete('user')
self.redirect("/")
def main():
log.startLogging(sys.stdout)
reactor.listenTCP(8888, Application(), interface="127.0.0.1")
reactor.run()
if __name__ == "__main__":
main()
|
{
"content_hash": "c612e2ec4238a9471ae744f042f50a26",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 77,
"avg_line_length": 27.344827586206897,
"alnum_prop": 0.5565363598150483,
"repo_name": "fiorix/cyclone",
"id": "cf9b7cd9915330f9c638d777d9f126d9e5b002b3",
"size": "3043",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "demos/pycket/pycketdemo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2386"
},
{
"name": "HTML",
"bytes": "32384"
},
{
"name": "Makefile",
"bytes": "642"
},
{
"name": "Python",
"bytes": "518718"
},
{
"name": "Shell",
"bytes": "9517"
}
],
"symlink_target": ""
}
|
import graphene
from ...core.permissions import OrderPermissions, ProductPermissions
from ..core.enums import ReportingPeriod
from ..core.fields import FilterInputConnectionField, PrefetchingConnectionField
from ..decorators import permission_required
from ..descriptions import DESCRIPTIONS
from ..translations.mutations import (
AttributeTranslate,
AttributeValueTranslate,
CategoryTranslate,
CollectionTranslate,
ProductTranslate,
ProductVariantTranslate,
)
from .bulk_mutations.attributes import AttributeBulkDelete, AttributeValueBulkDelete
from .bulk_mutations.products import (
CategoryBulkDelete,
CollectionBulkDelete,
CollectionBulkPublish,
ProductBulkDelete,
ProductBulkPublish,
ProductImageBulkDelete,
ProductTypeBulkDelete,
ProductVariantBulkCreate,
ProductVariantBulkDelete,
)
from .enums import StockAvailability
from .filters import (
AttributeFilterInput,
CategoryFilterInput,
CollectionFilterInput,
ProductFilterInput,
ProductTypeFilterInput,
)
from .mutations.attributes import (
AttributeAssign,
AttributeClearMeta,
AttributeClearPrivateMeta,
AttributeCreate,
AttributeDelete,
AttributeReorderValues,
AttributeUnassign,
AttributeUpdate,
AttributeUpdateMeta,
AttributeUpdatePrivateMeta,
AttributeValueCreate,
AttributeValueDelete,
AttributeValueUpdate,
ProductTypeReorderAttributes,
)
from .mutations.digital_contents import (
DigitalContentCreate,
DigitalContentDelete,
DigitalContentUpdate,
DigitalContentUrlCreate,
)
from .mutations.products import (
CategoryClearMeta,
CategoryClearPrivateMeta,
CategoryCreate,
CategoryDelete,
CategoryUpdate,
CategoryUpdateMeta,
CategoryUpdatePrivateMeta,
CollectionAddProducts,
CollectionClearMeta,
CollectionClearPrivateMeta,
CollectionCreate,
CollectionDelete,
CollectionRemoveProducts,
CollectionReorderProducts,
CollectionUpdate,
CollectionUpdateMeta,
CollectionUpdatePrivateMeta,
ProductClearMeta,
ProductClearPrivateMeta,
ProductCreate,
ProductDelete,
ProductImageCreate,
ProductImageDelete,
ProductImageReorder,
ProductImageUpdate,
ProductTypeClearMeta,
ProductTypeClearPrivateMeta,
ProductTypeCreate,
ProductTypeDelete,
ProductTypeUpdate,
ProductTypeUpdateMeta,
ProductTypeUpdatePrivateMeta,
ProductUpdate,
ProductUpdateMeta,
ProductUpdatePrivateMeta,
ProductVariantClearMeta,
ProductVariantClearPrivateMeta,
ProductVariantCreate,
ProductVariantDelete,
ProductVariantUpdate,
ProductVariantUpdateMeta,
ProductVariantUpdatePrivateMeta,
VariantImageAssign,
VariantImageUnassign,
)
from .resolvers import (
resolve_attributes,
resolve_categories,
resolve_collections,
resolve_digital_contents,
resolve_product_types,
resolve_product_variants,
resolve_products,
resolve_report_product_sales,
)
from .scalars import AttributeScalar
from .sorters import (
AttributeSortingInput,
CategorySortingInput,
CollectionSortingInput,
ProductOrder,
ProductTypeSortingInput,
)
from .types import (
Attribute,
Category,
Collection,
DigitalContent,
Product,
ProductType,
ProductVariant,
)
class ProductQueries(graphene.ObjectType):
digital_content = graphene.Field(
DigitalContent,
description="Look up digital content by ID.",
id=graphene.Argument(
graphene.ID, description="ID of the digital content.", required=True
),
)
digital_contents = PrefetchingConnectionField(
DigitalContent, description="List of digital content."
)
attributes = FilterInputConnectionField(
Attribute,
description="List of the shop's attributes.",
query=graphene.String(description=DESCRIPTIONS["attributes"]),
in_category=graphene.Argument(
graphene.ID,
description=(
"Return attributes for products belonging to the given category. "
"DEPRECATED: Will be removed in Saleor 2.10, use the `filter` field "
"instead."
),
),
in_collection=graphene.Argument(
graphene.ID,
description=(
"Return attributes for products belonging to the given collection. "
"DEPRECATED: Will be removed in Saleor 2.10, use the `filter` "
"field instead."
),
),
filter=AttributeFilterInput(description="Filtering options for attributes."),
sort_by=AttributeSortingInput(description="Sorting options for attributes."),
)
attribute = graphene.Field(
Attribute,
id=graphene.Argument(
graphene.ID, description="ID of the attribute.", required=True
),
description="Look up an attribute by ID.",
)
categories = FilterInputConnectionField(
Category,
query=graphene.String(description=DESCRIPTIONS["category"]),
filter=CategoryFilterInput(description="Filtering options for categories."),
sort_by=CategorySortingInput(description="Sort categories."),
level=graphene.Argument(
graphene.Int,
description="Filter categories by the nesting level in the category tree.",
),
description="List of the shop's categories.",
)
category = graphene.Field(
Category,
id=graphene.Argument(
graphene.ID, required=True, description="ID of the category."
),
description="Look up a category by ID.",
)
collection = graphene.Field(
Collection,
id=graphene.Argument(
graphene.ID, description="ID of the collection.", required=True
),
description="Look up a collection by ID.",
)
collections = FilterInputConnectionField(
Collection,
filter=CollectionFilterInput(description="Filtering options for collections."),
sort_by=CollectionSortingInput(description="Sort collections."),
query=graphene.String(description=DESCRIPTIONS["collection"]),
description="List of the shop's collections.",
)
product = graphene.Field(
Product,
id=graphene.Argument(
graphene.ID, description="ID of the product.", required=True
),
description="Look up a product by ID.",
)
products = FilterInputConnectionField(
Product,
filter=ProductFilterInput(description="Filtering options for products."),
attributes=graphene.List(
AttributeScalar,
description=(
"Filter products by attributes. DEPRECATED: Will be removed in "
"Saleor 2.10, use the `filter` field instead."
),
),
categories=graphene.List(
graphene.ID,
description=(
"Filter products by category. DEPRECATED: Will be removed in "
"Saleor 2.10, use the `filter` field instead."
),
),
collections=graphene.List(
graphene.ID,
description=(
"Filter products by collections. DEPRECATED: Will be removed in "
"Saleor 2.10, use the `filter` field instead."
),
),
sort_by=ProductOrder(description="Sort products."),
stock_availability=graphene.Argument(
StockAvailability, description="Filter products by stock availability."
),
query=graphene.String(description=DESCRIPTIONS["product"]),
description="List of the shop's products.",
)
product_type = graphene.Field(
ProductType,
id=graphene.Argument(
graphene.ID, description="ID of the product type.", required=True
),
description="Look up a product type by ID.",
)
product_types = FilterInputConnectionField(
ProductType,
filter=ProductTypeFilterInput(
description="Filtering options for product types."
),
query=graphene.String(description=DESCRIPTIONS["product_type"]),
sort_by=ProductTypeSortingInput(description="Sort product types."),
description="List of the shop's product types.",
)
product_variant = graphene.Field(
ProductVariant,
id=graphene.Argument(
graphene.ID, description="ID of the product variant.", required=True
),
description="Look up a product variant by ID.",
)
product_variants = PrefetchingConnectionField(
ProductVariant,
ids=graphene.List(
graphene.ID, description="Filter product variants by given IDs."
),
description="List of product variants.",
)
report_product_sales = PrefetchingConnectionField(
ProductVariant,
period=graphene.Argument(
ReportingPeriod, required=True, description="Span of time."
),
description="List of top selling products.",
)
def resolve_attributes(self, info, **kwargs):
return resolve_attributes(info, **kwargs)
def resolve_attribute(self, info, id):
return graphene.Node.get_node_from_global_id(info, id, Attribute)
def resolve_categories(self, info, level=None, query=None, **kwargs):
return resolve_categories(info, level=level, query=query, **kwargs)
def resolve_category(self, info, id):
return graphene.Node.get_node_from_global_id(info, id, Category)
def resolve_collection(self, info, id):
return graphene.Node.get_node_from_global_id(info, id, Collection)
def resolve_collections(self, info, query=None, **kwargs):
return resolve_collections(info, query, **kwargs)
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_digital_content(self, info, id):
return graphene.Node.get_node_from_global_id(info, id, DigitalContent)
@permission_required(ProductPermissions.MANAGE_PRODUCTS)
def resolve_digital_contents(self, info, **_kwargs):
return resolve_digital_contents(info)
def resolve_product(self, info, id):
return graphene.Node.get_node_from_global_id(info, id, Product)
def resolve_products(self, info, **kwargs):
return resolve_products(info, **kwargs)
def resolve_product_type(self, info, id):
return graphene.Node.get_node_from_global_id(info, id, ProductType)
def resolve_product_types(self, info, query=None, **kwargs):
return resolve_product_types(info, query, **kwargs)
def resolve_product_variant(self, info, id):
return graphene.Node.get_node_from_global_id(info, id, ProductVariant)
def resolve_product_variants(self, info, ids=None, **_kwargs):
return resolve_product_variants(info, ids)
@permission_required(
[OrderPermissions.MANAGE_ORDERS, ProductPermissions.MANAGE_PRODUCTS]
)
def resolve_report_product_sales(self, *_args, period, **_kwargs):
return resolve_report_product_sales(period)
class ProductMutations(graphene.ObjectType):
attribute_create = AttributeCreate.Field()
attribute_delete = AttributeDelete.Field()
attribute_bulk_delete = AttributeBulkDelete.Field()
attribute_assign = AttributeAssign.Field()
attribute_unassign = AttributeUnassign.Field()
attribute_update = AttributeUpdate.Field()
attribute_translate = AttributeTranslate.Field()
attribute_update_metadata = AttributeUpdateMeta.Field()
attribute_clear_metadata = AttributeClearMeta.Field()
attribute_update_private_metadata = AttributeUpdatePrivateMeta.Field()
attribute_clear_private_metadata = AttributeClearPrivateMeta.Field()
attribute_value_create = AttributeValueCreate.Field()
attribute_value_delete = AttributeValueDelete.Field()
attribute_value_bulk_delete = AttributeValueBulkDelete.Field()
attribute_value_update = AttributeValueUpdate.Field()
attribute_value_translate = AttributeValueTranslate.Field()
attribute_reorder_values = AttributeReorderValues.Field()
category_create = CategoryCreate.Field()
category_delete = CategoryDelete.Field()
category_bulk_delete = CategoryBulkDelete.Field()
category_update = CategoryUpdate.Field()
category_translate = CategoryTranslate.Field()
category_update_metadata = CategoryUpdateMeta.Field()
category_clear_metadata = CategoryClearMeta.Field()
category_update_private_metadata = CategoryUpdatePrivateMeta.Field()
category_clear_private_metadata = CategoryClearPrivateMeta.Field()
collection_add_products = CollectionAddProducts.Field()
collection_create = CollectionCreate.Field()
collection_delete = CollectionDelete.Field()
collection_reorder_products = CollectionReorderProducts.Field()
collection_bulk_delete = CollectionBulkDelete.Field()
collection_bulk_publish = CollectionBulkPublish.Field()
collection_remove_products = CollectionRemoveProducts.Field()
collection_update = CollectionUpdate.Field()
collection_translate = CollectionTranslate.Field()
collection_update_metadata = CollectionUpdateMeta.Field()
collection_clear_metadata = CollectionClearMeta.Field()
collection_update_private_metadata = CollectionUpdatePrivateMeta.Field()
collection_clear_private_metadata = CollectionClearPrivateMeta.Field()
product_create = ProductCreate.Field()
product_delete = ProductDelete.Field()
product_bulk_delete = ProductBulkDelete.Field()
product_bulk_publish = ProductBulkPublish.Field()
product_update = ProductUpdate.Field()
product_translate = ProductTranslate.Field()
product_update_metadata = ProductUpdateMeta.Field()
product_clear_metadata = ProductClearMeta.Field()
product_update_private_metadata = ProductUpdatePrivateMeta.Field()
product_clear_private_metadata = ProductClearPrivateMeta.Field()
product_image_create = ProductImageCreate.Field()
product_image_delete = ProductImageDelete.Field()
product_image_bulk_delete = ProductImageBulkDelete.Field()
product_image_reorder = ProductImageReorder.Field()
product_image_update = ProductImageUpdate.Field()
product_type_create = ProductTypeCreate.Field()
product_type_delete = ProductTypeDelete.Field()
product_type_bulk_delete = ProductTypeBulkDelete.Field()
product_type_update = ProductTypeUpdate.Field()
product_type_reorder_attributes = ProductTypeReorderAttributes.Field()
product_type_update_metadata = ProductTypeUpdateMeta.Field()
product_type_clear_metadata = ProductTypeClearMeta.Field()
product_type_update_private_metadata = ProductTypeUpdatePrivateMeta.Field()
product_type_clear_private_metadata = ProductTypeClearPrivateMeta.Field()
digital_content_create = DigitalContentCreate.Field()
digital_content_delete = DigitalContentDelete.Field()
digital_content_update = DigitalContentUpdate.Field()
digital_content_url_create = DigitalContentUrlCreate.Field()
product_variant_create = ProductVariantCreate.Field()
product_variant_delete = ProductVariantDelete.Field()
product_variant_bulk_create = ProductVariantBulkCreate.Field()
product_variant_bulk_delete = ProductVariantBulkDelete.Field()
product_variant_update = ProductVariantUpdate.Field()
product_variant_translate = ProductVariantTranslate.Field()
product_variant_update_metadata = ProductVariantUpdateMeta.Field()
product_variant_clear_metadata = ProductVariantClearMeta.Field()
product_variant_update_private_metadata = ProductVariantUpdatePrivateMeta.Field()
product_variant_clear_private_metadata = ProductVariantClearPrivateMeta.Field()
variant_image_assign = VariantImageAssign.Field()
variant_image_unassign = VariantImageUnassign.Field()
|
{
"content_hash": "9bebb714ad3ca8467e61d6c444fe6a14",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 87,
"avg_line_length": 37.548463356974,
"alnum_prop": 0.7059119813637222,
"repo_name": "maferelo/saleor",
"id": "e9a2ea9d6bffa4401e0a7ca8cf3604285244a52b",
"size": "15883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/product/schema.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "64217"
},
{
"name": "HTML",
"bytes": "394723"
},
{
"name": "JavaScript",
"bytes": "61157"
},
{
"name": "Python",
"bytes": "585270"
}
],
"symlink_target": ""
}
|
import unittest
from nova import crypto
from nova import flags
from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.auth import fakeldap
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.auth_unittest')
class user_generator(object):
def __init__(self, manager, **user_state):
if 'name' not in user_state:
user_state['name'] = 'test1'
self.manager = manager
self.user = manager.create_user(**user_state)
def __enter__(self):
return self.user
def __exit__(self, value, type, trace):
self.manager.delete_user(self.user)
class project_generator(object):
def __init__(self, manager, **project_state):
if 'name' not in project_state:
project_state['name'] = 'testproj'
if 'manager_user' not in project_state:
project_state['manager_user'] = 'test1'
self.manager = manager
self.project = manager.create_project(**project_state)
def __enter__(self):
return self.project
def __exit__(self, value, type, trace):
self.manager.delete_project(self.project)
class user_and_project_generator(object):
def __init__(self, manager, user_state=None, project_state=None):
if not user_state:
user_state = {}
if not project_state:
project_state = {}
self.manager = manager
if 'name' not in user_state:
user_state['name'] = 'test1'
if 'name' not in project_state:
project_state['name'] = 'testproj'
if 'manager_user' not in project_state:
project_state['manager_user'] = 'test1'
self.user = manager.create_user(**user_state)
self.project = manager.create_project(**project_state)
def __enter__(self):
return (self.user, self.project)
def __exit__(self, value, type, trace):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
class _AuthManagerBaseTestCase(test.TestCase):
def setUp(self):
super(_AuthManagerBaseTestCase, self).setUp()
self.flags(auth_driver=self.auth_driver,
connection_type='fake')
self.manager = manager.AuthManager(new=True)
self.manager.mc.cache = {}
def test_create_and_find_user(self):
with user_generator(self.manager):
self.assert_(self.manager.get_user('test1'))
def test_create_and_find_with_properties(self):
with user_generator(self.manager, name="herbert", secret="classified",
access="private-party"):
u = self.manager.get_user('herbert')
self.assertEqual('herbert', u.id)
self.assertEqual('herbert', u.name)
self.assertEqual('classified', u.secret)
self.assertEqual('private-party', u.access)
def test_signature_is_valid(self):
with user_generator(self.manager, name='admin', secret='admin',
access='admin'):
with project_generator(self.manager, name="admin",
manager_user='admin'):
accesskey = 'admin:admin'
expected_result = (self.manager.get_user('admin'),
self.manager.get_project('admin'))
# captured sig and query string using boto 1.9b/euca2ools 1.2
sig = 'd67Wzd9Bwz8xid9QU+lzWXcF2Y3tRicYABPJgrqfrwM='
auth_params = {'AWSAccessKeyId': 'admin:admin',
'Action': 'DescribeAvailabilityZones',
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': '2011-04-22T11:29:29',
'Version': '2009-11-30'}
self.assertTrue(expected_result, self.manager.authenticate(
accesskey,
sig,
auth_params,
'GET',
'127.0.0.1:8773',
'/services/Cloud/'))
# captured sig and query string using RightAWS 1.10.0
sig = 'ECYLU6xdFG0ZqRVhQybPJQNJ5W4B9n8fGs6+/fuGD2c='
auth_params = {'AWSAccessKeyId': 'admin:admin',
'Action': 'DescribeAvailabilityZones',
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': '2011-04-22T11:29:49.000Z',
'Version': '2008-12-01'}
self.assertTrue(expected_result, self.manager.authenticate(
accesskey,
sig,
auth_params,
'GET',
'127.0.0.1',
'/services/Cloud'))
def test_can_get_credentials(self):
self.flags(use_deprecated_auth=True)
st = {'access': 'access', 'secret': 'secret'}
with user_and_project_generator(self.manager, user_state=st) as (u, p):
credentials = self.manager.get_environment_rc(u, p)
LOG.debug(credentials)
self.assertTrue('export EC2_ACCESS_KEY="access:testproj"\n'
in credentials)
self.assertTrue('export EC2_SECRET_KEY="secret"\n' in credentials)
def test_can_list_users(self):
with user_generator(self.manager):
with user_generator(self.manager, name="test2"):
users = self.manager.get_users()
self.assert_(filter(lambda u: u.id == 'test1', users))
self.assert_(filter(lambda u: u.id == 'test2', users))
self.assert_(not filter(lambda u: u.id == 'test3', users))
def test_can_add_and_remove_user_role(self):
with user_generator(self.manager):
self.assertFalse(self.manager.has_role('test1', 'itsec'))
self.manager.add_role('test1', 'itsec')
self.assertTrue(self.manager.has_role('test1', 'itsec'))
self.manager.remove_role('test1', 'itsec')
self.assertFalse(self.manager.has_role('test1', 'itsec'))
def test_can_create_and_get_project(self):
with user_and_project_generator(self.manager) as (u, p):
self.assert_(self.manager.get_user('test1'))
self.assert_(self.manager.get_user('test1'))
self.assert_(self.manager.get_project('testproj'))
def test_can_list_projects(self):
with user_and_project_generator(self.manager):
with project_generator(self.manager, name="testproj2"):
projects = self.manager.get_projects()
self.assert_(filter(lambda p: p.name == 'testproj', projects))
self.assert_(filter(lambda p: p.name == 'testproj2', projects))
self.assert_(not filter(lambda p: p.name == 'testproj3',
projects))
def test_can_create_and_get_project_with_attributes(self):
with user_generator(self.manager):
with project_generator(self.manager, description='A test project'):
project = self.manager.get_project('testproj')
self.assertEqual('A test project', project.description)
def test_can_create_project_with_manager(self):
with user_and_project_generator(self.manager) as (user, project):
self.assertEqual('test1', project.project_manager_id)
self.assertTrue(self.manager.is_project_manager(user, project))
def test_create_project_assigns_manager_to_members(self):
with user_and_project_generator(self.manager) as (user, project):
self.assertTrue(self.manager.is_project_member(user, project))
def test_no_extra_project_members(self):
with user_generator(self.manager, name='test2') as baduser:
with user_and_project_generator(self.manager) as (user, project):
self.assertFalse(self.manager.is_project_member(baduser,
project))
def test_no_extra_project_managers(self):
with user_generator(self.manager, name='test2') as baduser:
with user_and_project_generator(self.manager) as (user, project):
self.assertFalse(self.manager.is_project_manager(baduser,
project))
def test_can_add_user_to_project(self):
with user_generator(self.manager, name='test2') as user:
with user_and_project_generator(self.manager) as (_user, project):
self.manager.add_to_project(user, project)
project = self.manager.get_project('testproj')
self.assertTrue(self.manager.is_project_member(user, project))
def test_can_remove_user_from_project(self):
with user_generator(self.manager, name='test2') as user:
with user_and_project_generator(self.manager) as (_user, project):
self.manager.add_to_project(user, project)
project = self.manager.get_project('testproj')
self.assertTrue(self.manager.is_project_member(user, project))
self.manager.remove_from_project(user, project)
project = self.manager.get_project('testproj')
self.assertFalse(self.manager.is_project_member(user, project))
def test_can_add_remove_user_with_role(self):
with user_generator(self.manager, name='test2') as user:
with user_and_project_generator(self.manager) as (_user, project):
# NOTE(todd): after modifying users you must reload project
self.manager.add_to_project(user, project)
project = self.manager.get_project('testproj')
self.manager.add_role(user, 'developer', project)
self.assertTrue(self.manager.is_project_member(user, project))
self.manager.remove_from_project(user, project)
project = self.manager.get_project('testproj')
self.assertFalse(self.manager.has_role(user, 'developer',
project))
self.assertFalse(self.manager.is_project_member(user, project))
def test_adding_role_to_project_is_ignored_unless_added_to_user(self):
with user_and_project_generator(self.manager) as (user, project):
self.assertFalse(self.manager.has_role(user, 'sysadmin', project))
self.manager.add_role(user, 'sysadmin', project)
# NOTE(todd): it will still show up in get_user_roles(u, project)
self.assertFalse(self.manager.has_role(user, 'sysadmin', project))
self.manager.add_role(user, 'sysadmin')
self.assertTrue(self.manager.has_role(user, 'sysadmin', project))
def test_add_user_role_doesnt_infect_project_roles(self):
with user_and_project_generator(self.manager) as (user, project):
self.assertFalse(self.manager.has_role(user, 'sysadmin', project))
self.manager.add_role(user, 'sysadmin')
self.assertFalse(self.manager.has_role(user, 'sysadmin', project))
def test_can_list_user_roles(self):
with user_and_project_generator(self.manager) as (user, project):
self.manager.add_role(user, 'sysadmin')
roles = self.manager.get_user_roles(user)
self.assertTrue('sysadmin' in roles)
self.assertFalse('netadmin' in roles)
def test_can_list_project_roles(self):
with user_and_project_generator(self.manager) as (user, project):
self.manager.add_role(user, 'sysadmin')
self.manager.add_role(user, 'sysadmin', project)
self.manager.add_role(user, 'netadmin', project)
project_roles = self.manager.get_user_roles(user, project)
self.assertTrue('sysadmin' in project_roles)
self.assertTrue('netadmin' in project_roles)
# has role should be false user-level role is missing
self.assertFalse(self.manager.has_role(user, 'netadmin', project))
def test_can_remove_user_roles(self):
with user_and_project_generator(self.manager) as (user, project):
self.manager.add_role(user, 'sysadmin')
self.assertTrue(self.manager.has_role(user, 'sysadmin'))
self.manager.remove_role(user, 'sysadmin')
self.assertFalse(self.manager.has_role(user, 'sysadmin'))
def test_removing_user_role_hides_it_from_project(self):
with user_and_project_generator(self.manager) as (user, project):
self.manager.add_role(user, 'sysadmin')
self.manager.add_role(user, 'sysadmin', project)
self.assertTrue(self.manager.has_role(user, 'sysadmin', project))
self.manager.remove_role(user, 'sysadmin')
self.assertFalse(self.manager.has_role(user, 'sysadmin', project))
def test_can_remove_project_role_but_keep_user_role(self):
with user_and_project_generator(self.manager) as (user, project):
self.manager.add_role(user, 'sysadmin')
self.manager.add_role(user, 'sysadmin', project)
self.assertTrue(self.manager.has_role(user, 'sysadmin'))
self.manager.remove_role(user, 'sysadmin', project)
self.assertFalse(self.manager.has_role(user, 'sysadmin', project))
self.assertTrue(self.manager.has_role(user, 'sysadmin'))
def test_can_retrieve_project_by_user(self):
with user_and_project_generator(self.manager) as (user, project):
self.assertEqual(1, len(self.manager.get_projects('test1')))
def test_can_modify_project(self):
with user_and_project_generator(self.manager):
with user_generator(self.manager, name='test2'):
self.manager.modify_project('testproj', 'test2', 'new desc')
project = self.manager.get_project('testproj')
self.assertEqual('test2', project.project_manager_id)
self.assertEqual('new desc', project.description)
def test_modify_project_adds_new_manager(self):
with user_and_project_generator(self.manager):
with user_generator(self.manager, name='test2'):
self.manager.modify_project('testproj', 'test2', 'new desc')
project = self.manager.get_project('testproj')
self.assertTrue('test2' in project.member_ids)
def test_can_delete_project(self):
with user_generator(self.manager):
self.manager.create_project('testproj', 'test1')
self.assert_(self.manager.get_project('testproj'))
self.manager.delete_project('testproj')
projectlist = self.manager.get_projects()
self.assert_(not filter(lambda p: p.name == 'testproj',
projectlist))
def test_can_delete_user(self):
self.manager.create_user('test1')
self.assert_(self.manager.get_user('test1'))
self.manager.delete_user('test1')
userlist = self.manager.get_users()
self.assert_(not filter(lambda u: u.id == 'test1', userlist))
def test_can_modify_users(self):
with user_generator(self.manager):
self.manager.modify_user('test1', 'access', 'secret', True)
user = self.manager.get_user('test1')
self.assertEqual('access', user.access)
self.assertEqual('secret', user.secret)
self.assertTrue(user.is_admin())
class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
def test_reconnect_on_server_failure(self):
self.manager.get_users()
fakeldap.server_fail = True
try:
self.assertRaises(fakeldap.SERVER_DOWN, self.manager.get_users)
finally:
fakeldap.server_fail = False
self.manager.get_users()
class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'
if __name__ == "__main__":
# TODO: Implement use_fake as an option
unittest.main()
|
{
"content_hash": "dc8319c0b152f82e9dae5803446eb76c",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 79,
"avg_line_length": 46.893982808022926,
"alnum_prop": 0.5939753146767689,
"repo_name": "KarimAllah/nova",
"id": "bdc7f3142b4a706734ccd5d0dc204a6957670af5",
"size": "17143",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/test_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "5419134"
},
{
"name": "Shell",
"bytes": "24506"
}
],
"symlink_target": ""
}
|
'''
7D2D entities/map markers Tornado Server
by: Adam Dybczak (RaTilicus)
'''
import time
import tornado.web, tornado.ioloop
import motor
from bson.json_util import loads as json_decode, dumps as json_encode
from tornado import gen
from handlers import IndexHandler, RecipesHandler, AboutHandler, LoginHandler, LogoutHandler, MarkerHandler
from websocket import WebSocketPool, WebSocket
import telnetlib
from telnet_handler import TelnetHandler
from log import Log
if __name__ == '__main__':
print 'START'
db = motor.MotorClient().sdtd
log = Log.get_log(db)
telnet_handler = TelnetHandler(db, telnet_host='localhost', telnet_port=25025)
sockets = WebSocketPool(db=db, th=telnet_handler)
telnet_handler.set_sockets(sockets)
SETTINGS = {
't': int(time.time()),
'cookie_secret': "__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
"login_url": "/login",
#'xsrf_cookies': True,
'autoreload': True,
#'debug': True,
'db': db,
'sockets': sockets,
}
URLS = [
(r"^/$", IndexHandler),
(r"^/recipes/$", RecipesHandler),
(r"^/about/$", AboutHandler),
(r'/ws/', WebSocket),
(r"^/login/$", LoginHandler),
(r"^/logout/$", LogoutHandler),
(r'^/markers/$', MarkerHandler),
(r'^/markers/([a-z0-9]+)/$', MarkerHandler),
]
try:
# update settings based on secrets file (not to be shared with github)
import __secrets__ as secret
SETTINGS.update(secret.SETTINGS)
except:
pass
application = tornado.web.Application(URLS, **SETTINGS)
application.listen(8888)
tornado.ioloop.PeriodicCallback(telnet_handler.update, 1000).start()
tornado.ioloop.IOLoop.instance().start()
print 'END'
|
{
"content_hash": "d4296b4e1f940741518f1f0f7abcbb93",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 107,
"avg_line_length": 28.523809523809526,
"alnum_prop": 0.6338341680578742,
"repo_name": "ratilicus/sdtd",
"id": "5d9838a4ab274edf160aa8b0e4cbc3460aeaefdf",
"size": "1816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdtd-tornado.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1367"
},
{
"name": "HTML",
"bytes": "9426"
},
{
"name": "JavaScript",
"bytes": "21058"
},
{
"name": "Python",
"bytes": "53047"
}
],
"symlink_target": ""
}
|
import json
import os
import path
import shutil
import socket
import tempfile
import urllib2
from django.conf import settings
import mock
from nose.tools import eq_
from PIL import Image
import amo
import amo.tests
from addons.models import Addon
from amo.tests.test_helpers import get_image_path
from amo.urlresolvers import reverse
from amo.utils import ImageCheck
from devhub import tasks
from devhub.tests.test_views import BaseWebAppTest
from files.models import FileUpload
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = 32
final_size = (32, 12)
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = 100
final_size = (82, 31)
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = 82
final_size = (82, 31)
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 82, 100]
final_size = [(32, 12), (82, 31), (82, 31)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (82, 31)
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png",
delete=False)
# resize_icon removes the original
shutil.copyfile(img, src.name)
src_image = Image.open(src.name)
eq_(src_image.size, original_size)
if isinstance(final_size, list):
for rsize, fsize in zip(resize_size, final_size):
dest_name = str(path.path(settings.ADDON_ICONS_PATH) / '1234')
tasks.resize_icon(src.name, dest_name, resize_size)
dest_image = Image.open("%s-%s.png" % (dest_name, rsize))
eq_(dest_image.size, fsize)
if os.path.exists(dest_image.filename):
os.remove(dest_image.filename)
assert not os.path.exists(dest_image.filename)
else:
dest = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png")
tasks.resize_icon(src.name, dest.name, resize_size)
dest_image = Image.open(dest.name)
eq_(dest_image.size, final_size)
assert not os.path.exists(src.name)
class TestValidator(amo.tests.TestCase):
def setUp(self):
self.upload = FileUpload.objects.create()
assert not self.upload.valid
def get_upload(self):
return FileUpload.objects.get(pk=self.upload.pk)
@mock.patch('devhub.tasks.run_validator')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
tasks.validator(self.upload.pk)
assert self.get_upload().valid
@mock.patch('devhub.tasks.run_validator')
def test_fail_validation(self, _mock):
_mock.return_value = '{"errors": 2}'
tasks.validator(self.upload.pk)
assert not self.get_upload().valid
@mock.patch('devhub.tasks.run_validator')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
eq_(self.upload.task_error, None)
with self.assertRaises(Exception):
tasks.validator(self.upload.pk)
error = self.get_upload().task_error
assert error.startswith('Traceback (most recent call last)'), error
class TestFlagBinary(amo.tests.TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
self.addon.update(binary=False)
@mock.patch('devhub.tasks.run_validator')
def test_flag_binary(self, _mock):
_mock.return_value = '{"metadata":{"contains_binary_extension": 1}}'
tasks.flag_binary([self.addon.pk])
eq_(Addon.objects.get(pk=self.addon.pk).binary, True)
@mock.patch('devhub.tasks.run_validator')
def test_flag_not_binary(self, _mock):
_mock.return_value = '{"metadata":{"contains_binary_extension": 0}}'
tasks.flag_binary([self.addon.pk])
eq_(Addon.objects.get(pk=self.addon.pk).binary, False)
@mock.patch('devhub.tasks.run_validator')
def test_flag_error(self, _mock):
_mock.side_effect = RuntimeError()
tasks.flag_binary([self.addon.pk])
eq_(Addon.objects.get(pk=self.addon.pk).binary, False)
class TestFetchManifest(amo.tests.TestCase):
def setUp(self):
self.upload = FileUpload.objects.create()
self.content_type = 'application/x-web-app-manifest+json'
patcher = mock.patch('devhub.tasks.urllib2.urlopen')
self.urlopen_mock = patcher.start()
self.addCleanup(patcher.stop)
@mock.patch('devhub.tasks.validator')
def test_success_add_file(self, validator_mock):
response_mock = mock.Mock()
response_mock.read.return_value = 'woo'
response_mock.headers = {'Content-Type': self.content_type}
self.urlopen_mock.return_value = response_mock
tasks.fetch_manifest('http://xx.com/manifest.json', self.upload.pk)
upload = FileUpload.objects.get(pk=self.upload.pk)
eq_(upload.name, 'http://xx.com/manifest.json')
eq_(open(upload.path).read(), 'woo')
@mock.patch('devhub.tasks.validator')
def test_success_call_validator(self, validator_mock):
response_mock = mock.Mock()
response_mock.read.return_value = 'woo'
ct = self.content_type + '; charset=utf-8'
response_mock.headers = {'Content-Type': ct}
self.urlopen_mock.return_value = response_mock
tasks.fetch_manifest('http://xx.com/manifest.json', self.upload.pk)
assert validator_mock.called
def check_validation(self, msg):
upload = FileUpload.objects.get(pk=self.upload.pk)
validation = json.loads(upload.validation)
eq_(validation['errors'], 1)
eq_(validation['success'], False)
eq_(len(validation['messages']), 1)
eq_(validation['messages'][0]['message'], msg)
def test_connection_error(self):
reason = socket.gaierror(8, 'nodename nor servname provided')
self.urlopen_mock.side_effect = urllib2.URLError(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation('Could not contact host at "url".')
def test_url_timeout(self):
reason = socket.timeout('too slow')
self.urlopen_mock.side_effect = urllib2.URLError(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation('Connection to "url" timed out.')
def test_other_url_error(self):
reason = Exception('Some other failure.')
self.urlopen_mock.side_effect = urllib2.URLError(reason)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation('Some other failure.')
def test_no_content_type(self):
response_mock = mock.Mock()
response_mock.read.return_value = 'woo'
response_mock.headers = {}
self.urlopen_mock.return_value = response_mock
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'Your manifest must be served with the HTTP header '
'"Content-Type: application/x-web-app-manifest+json".')
def test_bad_content_type(self):
response_mock = mock.Mock()
response_mock.read.return_value = 'woo'
response_mock.headers = {'Content-Type': 'x'}
self.urlopen_mock.return_value = response_mock
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation(
'Your manifest must be served with the HTTP header '
'"Content-Type: application/x-web-app-manifest+json". We saw "x".')
def test_response_too_large(self):
response_mock = mock.Mock()
content = 'x' * (settings.MAX_WEBAPP_UPLOAD_SIZE + 1)
response_mock.read.return_value = content
response_mock.headers = {'Content-Type': self.content_type}
self.urlopen_mock.return_value = response_mock
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation('Your manifest must be less than 2097152 bytes.')
def test_http_error(self):
self.urlopen_mock.side_effect = urllib2.HTTPError(
'url', 404, 'Not Found', [], None)
tasks.fetch_manifest('url', self.upload.pk)
self.check_validation('url responded with 404 (Not Found).')
class TestFetchIcon(BaseWebAppTest):
def setUp(self):
super(TestFetchIcon, self).setUp()
self.content_type = 'image/png'
self.apps_path = (path.path(settings.ROOT) / 'apps'
/ 'devhub' / 'tests' / 'addons')
def webapp_from_path(self, path):
self.upload = self.get_upload(abspath=path)
self.url = reverse('devhub.submit.2')
assert self.client.login(username='regular@mozilla.com',
password='password')
self.client.post(reverse('devhub.submit.1'))
return self.post_addon()
def test_no_icons(self):
path = self.apps_path / 'noicon.webapp'
iconless_app = self.webapp_from_path(path)
urllib2.urlopen = mock.Mock()
tasks.fetch_icon(iconless_app)
assert not urllib2.urlopen.called
def check_icons(self, webapp):
manifest = webapp.get_manifest_json()
biggest = max([int(size) for size in manifest['icons']])
icon_dir = webapp.get_icon_dir()
for size in amo.ADDON_ICON_SIZES:
if not size <= biggest:
continue
icon_path = os.path.join(icon_dir, '%s-%s.png'
% (str(webapp.id), size))
with open(icon_path, 'r') as img:
checker = ImageCheck(img)
assert checker.is_image()
eq_(checker.img.size, (size, size))
def test_data_uri(self):
app_path = self.apps_path / 'dataicon.webapp'
webapp = self.webapp_from_path(app_path)
tasks.fetch_icon(webapp)
eq_(webapp.icon_type, self.content_type)
self.check_icons(webapp)
def test_hosted_icon(self):
app_path = self.apps_path / 'mozball.webapp'
webapp = self.webapp_from_path(app_path)
img_path = self.apps_path / 'mozball-128.png'
with open(img_path, 'r') as content:
tasks.save_icon(webapp, content.read())
eq_(webapp.icon_type, self.content_type)
self.check_icons(webapp)
|
{
"content_hash": "ea42dae67b6d237405509d6cbbfc6c17",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 79,
"avg_line_length": 34.47712418300654,
"alnum_prop": 0.6283412322274882,
"repo_name": "jbalogh/zamboni",
"id": "478cc78d0ef3e22eb0839dd5a3ff5d794212e844",
"size": "10550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/devhub/tests/test_tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "JavaScript",
"bytes": "1553612"
},
{
"name": "Python",
"bytes": "2860649"
},
{
"name": "Shell",
"bytes": "8095"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import time
from pytibrv.Tibrv import *
import unittest
class TimerTest(unittest.TestCase, TibrvTimerCallback):
@classmethod
def setUpClass(cls):
status = Tibrv.open()
if status != TIBRV_OK:
raise TibrvError(status)
@classmethod
def tearDownClass(cls):
Tibrv.close()
def callback(self, event: TibrvTimer, msg, closure):
self.counter += 1
print(self.counter, datetime.now())
if self.counter >= 10:
status = event.destroy()
def test_create(self):
self.counter = 0
que = TibrvQueue()
que.create('TIMER TEST')
disp = TibrvDispatcher()
status = disp.create(que)
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
print('')
tm = TibrvTimer()
status = tm.create(que, self, 1.0)
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
# run for 11 seconds
self.timeout = time.time() + 11
while time.time() <= self.timeout:
time.sleep(0.5)
# wait till callback() destroy iteself when counter >= 10
if tm.id() == 0:
break;
#print('SLEEP...')
status = disp.destroy()
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
self.assertEqual(10, self.counter)
# Timer had been destroyed in callback
status = tm.destroy()
self.assertEqual(TIBRV_INVALID_EVENT, status, TibrvStatus.text(status))
status = que.destroy()
self.assertEqual(TIBRV_OK, status, TibrvStatus.text(status))
print('TEST DONE')
if __name__ == "__main__" :
unittest.main(verbosity=2)
|
{
"content_hash": "2154c0b122040fc141a507966a0912fa",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 79,
"avg_line_length": 26.439393939393938,
"alnum_prop": 0.5908309455587393,
"repo_name": "arienchen/pytibrv",
"id": "50c666539a9f0732ba14f668f035806f1133cbe7",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/test-timer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "382302"
}
],
"symlink_target": ""
}
|
from os import scandir
from django.contrib import admin
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from django.conf import settings
from .models import *
class TalkGroupAdmin(admin.ModelAdmin):
search_fields = ['alpha_tag', 'description', 'dec_id']
list_display = ('alpha_tag', 'description', 'dec_id', 'system')
save_on_top = True
class UnitAdmin(admin.ModelAdmin):
search_fields = ['description', 'dec_id' ]
list_display = ('description', 'dec_id', 'system' )
save_on_top = True
class TranmissionUnitInline(admin.TabularInline):
model = TranmissionUnit
extra = 0 # how many rows to show
class TransmissionAdmin(admin.ModelAdmin):
#inlines = (TranmissionUnitInline,)
raw_id_fields = ('talkgroup_info', 'units', 'source', 'system')
save_on_top = True
class SourceInline(admin.TabularInline):
model = Source
readonly_fields=('id',)
class SourceAdmin(admin.ModelAdmin):
list_display = ('id','description')
list_display_links = ('id','description')
#fields = ('id','description')
save_on_top = True
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.readonly_fields + ('id',)
return self.readonly_fields
class ScanListAdminForm(forms.ModelForm):
talkgroups = forms.ModelMultipleChoiceField(
queryset=TalkGroupWithSystem.objects.all(),
required=False,
widget=FilteredSelectMultiple(
verbose_name = 'talkgroups',
is_stacked=False
)
)
class Meta:
model = ScanList
fields = "__all__"
def __init__(self, *args, **kwargs):
super(ScanListAdminForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
self.fields['talkgroups'].initial = self.instance.talkgroups.all()
def save(self, commit=True):
scanlist = super(ScanListAdminForm, self).save(commit=False)
if commit:
scanlist.save()
if scanlist.pk:
scanlist.talkgroups.set(self.cleaned_data['talkgroups'])
self.save_m2m()
return scanlist
class ScanListAdmin(admin.ModelAdmin):
form = ScanListAdminForm
save_as = True
save_on_top = True
class ScanListRawAdmin(admin.ModelAdmin):
autocomplete_fields= ('talkgroups',)
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
class TalkGroupAccessAdminForm(forms.ModelForm):
talkgroups = forms.ModelMultipleChoiceField(
queryset=TalkGroupWithSystem.objects.all(),
required=False,
widget=FilteredSelectMultiple(
verbose_name = 'talkgroups',
is_stacked=False
)
)
class Meta:
model = TalkGroupAccess
fields = "__all__"
def __init__(self, *args, **kwargs):
super(TalkGroupAccessAdminForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
self.fields['talkgroups'].initial = self.instance.talkgroups.all()
def save(self, commit=True):
tglist = super(TalkGroupAccessAdminForm, self).save(commit=False)
if commit:
tglist.save()
if tglist.pk:
tglist.talkgroups.set(self.cleaned_data['talkgroups'])
self.save_m2m()
return tglist
class TalkGroupAccessAdmin(admin.ModelAdmin):
form = TalkGroupAccessAdminForm
list_display = ('name', 'default_group', 'default_new_talkgroups')
save_on_top = True
class TalkGroupAccessRawAdmin(admin.ModelAdmin):
autocomplete_fields= ('talkgroups',)
class TranmissionUnitAdmin(admin.ModelAdmin):
raw_id_fields = ("transmission", "unit")
save_on_top = True
class IncidentAdmin(admin.ModelAdmin):
raw_id_fields = ("transmissions",)
save_on_top = True
class CityForms(forms.ModelForm):
google_maps_url = forms.CharField(max_length=1000)
class Meta:
model = City
fields = '__all__'
def clean_google_maps_url(self):
data = self.cleaned_data.get('google_maps_url', '')
parts = data.split('"')
new_url = None
try:
new_url = parts[1]
except IndexError:
return self
return new_url
class CityAdmin(admin.ModelAdmin):
form = CityForms
class MessagePopUpAdmin(admin.ModelAdmin):
list_display = ('mesg_type', 'mesg_html', 'active')
admin.site.register(Transmission, TransmissionAdmin)
admin.site.register(Unit,UnitAdmin)
#admin.site.register(TranmissionUnit, TranmissionUnitAdmin)
admin.site.register(TalkGroup, TalkGroupAdmin)
if not settings.USE_RAW_ID_FIELDS:
admin.site.register(ScanList, ScanListAdmin)
admin.site.register(TalkGroupAccess, TalkGroupAccessAdmin)
else:
admin.site.register(ScanList, ScanListRawAdmin)
admin.site.register(TalkGroupAccess, TalkGroupAccessRawAdmin)
admin.site.register(MenuScanList)
admin.site.register(MenuTalkGroupList)
admin.site.register(Source, SourceAdmin)
admin.site.register(Agency)
admin.site.register(Plan)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(System)
admin.site.register(WebHtml)
admin.site.register(RepeaterSite)
admin.site.register(Service)
admin.site.register(SiteOption)
admin.site.register(Incident, IncidentAdmin)
admin.site.register(City, CityAdmin)
admin.site.register(MessagePopUp, MessagePopUpAdmin)
|
{
"content_hash": "7c7af0cc47b0b56a5c00858ea5a8e4a0",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 78,
"avg_line_length": 28.55778894472362,
"alnum_prop": 0.6802745029033961,
"repo_name": "ScanOC/trunk-player",
"id": "c3ab34b56c8ad8b76a538acbc5a6c5cac013ad96",
"size": "5683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radio/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5493"
},
{
"name": "Dockerfile",
"bytes": "768"
},
{
"name": "HTML",
"bytes": "47516"
},
{
"name": "JavaScript",
"bytes": "22401"
},
{
"name": "Python",
"bytes": "167619"
},
{
"name": "Shell",
"bytes": "5505"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import pytest
from django.http import HttpRequest
from rest_framework.exceptions import AuthenticationFailed
from sentry.api.authentication import ClientIdSecretAuthentication, DSNAuthentication
from sentry.models import ProjectKeyStatus
from sentry.testutils import TestCase
class TestClientIdSecretAuthentication(TestCase):
def setUp(self):
super(TestClientIdSecretAuthentication, self).setUp()
self.auth = ClientIdSecretAuthentication()
self.org = self.create_organization(owner=self.user)
self.sentry_app = self.create_sentry_app(name="foo", organization=self.org)
self.api_app = self.sentry_app.application
def test_authenticate(self):
request = HttpRequest()
request.json_body = {
"client_id": self.api_app.client_id,
"client_secret": self.api_app.client_secret,
}
user, _ = self.auth.authenticate(request)
assert user == self.sentry_app.proxy_user
def test_without_json_body(self):
request = HttpRequest()
request.json_body = None
with self.assertRaises(AuthenticationFailed):
self.auth.authenticate(request)
def test_missing_client_id(self):
request = HttpRequest()
request.json_body = {"client_secret": self.api_app.client_secret}
with self.assertRaises(AuthenticationFailed):
self.auth.authenticate(request)
def test_missing_client_secret(self):
request = HttpRequest()
request.json_body = {"client_id": self.api_app.client_id}
with self.assertRaises(AuthenticationFailed):
self.auth.authenticate(request)
def test_incorrect_client_id(self):
request = HttpRequest()
request.json_body = {"client_id": "notit", "client_secret": self.api_app.client_secret}
with self.assertRaises(AuthenticationFailed):
self.auth.authenticate(request)
def test_incorrect_client_secret(self):
request = HttpRequest()
request.json_body = {"client_id": self.api_app.client_id, "client_secret": "notit"}
with self.assertRaises(AuthenticationFailed):
self.auth.authenticate(request)
class TestDSNAuthentication(TestCase):
def setUp(self):
super(TestDSNAuthentication, self).setUp()
self.auth = DSNAuthentication()
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.project_key = self.create_project_key(project=self.project)
def test_authenticate(self):
request = HttpRequest()
request.META["HTTP_AUTHORIZATION"] = u"DSN {}".format(self.project_key.dsn_public)
result = self.auth.authenticate(request)
assert result is not None
user, auth = result
assert user.is_anonymous()
assert auth == self.project_key
def test_inactive_key(self):
self.project_key.update(status=ProjectKeyStatus.INACTIVE)
request = HttpRequest()
request.META["HTTP_AUTHORIZATION"] = u"DSN {}".format(self.project_key.dsn_public)
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
|
{
"content_hash": "2909f9cfe308f02293e6637ca96550b0",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 95,
"avg_line_length": 33.577319587628864,
"alnum_prop": 0.6751611912803193,
"repo_name": "mvaled/sentry",
"id": "8e270ac9115a00930a00f59d602c5134148ff3b6",
"size": "3257",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/api/test_authentication.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
import eventlet
import os
import sys
if os.name == 'nt':
# eventlet monkey patching causes subprocess.Popen to fail on Windows
# when using pipes due to missing non blocking I/O support
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
# If ../murano/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
root = os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)
if os.path.exists(os.path.join(root, 'murano', '__init__.py')):
sys.path.insert(0, root)
from murano.api.v1 import request_statistics
from murano.common import config
from murano.common import policy
from murano.common import server
from murano.common import statservice as stats
from murano.common import wsgi
from murano.openstack.common import log
from murano.openstack.common import service
def main():
try:
config.parse_args()
log.setup('murano')
request_statistics.init_stats()
policy.init()
launcher = service.ServiceLauncher()
app = config.load_paste_app('murano')
port, host = (config.CONF.bind_port, config.CONF.bind_host)
launcher.launch_service(wsgi.Service(app, port, host))
launcher.launch_service(server.get_rpc_service())
launcher.launch_service(server.get_notification_service())
launcher.launch_service(stats.StatsCollectingService())
launcher.wait()
except RuntimeError as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(1)
if __name__ == '__main__':
main()
|
{
"content_hash": "9a35ccf76b1ff0ec2dda653c4c037a1b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 31,
"alnum_prop": 0.684863523573201,
"repo_name": "telefonicaid/murano",
"id": "3721f8d6deca01f30af94e8899d9588e20aca590",
"size": "2250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "murano/cmd/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "8634"
},
{
"name": "Python",
"bytes": "826132"
},
{
"name": "Shell",
"bytes": "4296"
}
],
"symlink_target": ""
}
|
"""
Test basic DataFrame functionality.
"""
import pandas as pd
import pytest
import weld.grizzly as gr
def get_frames(cls, strings):
"""
Returns two DataFrames for testing binary operators.
The DataFrames have columns of overlapping/different names, types, etc.
"""
df1 = pd.DataFrame({
'name': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'Smith', 'Narayanan', 'Thomas', 'Thaker'],
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = pd.DataFrame({
'firstName': ['Bob', 'Sally', 'Kunal', 'Deepak', 'James', 'Pratiksha'],
'lastName': ['Kahn', 'Lopez', 'smith', 'narayanan', 'Thomas', 'thaker'],
'age': [25, 30, 45, 20, 60, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
if not strings:
df1 = df1.drop(['name', 'lastName'], axis=1)
df2 = df2.drop(['firstName', 'lastName'], axis=1)
return (cls(df1), cls(df2))
def _test_binop(pd_op, gr_op, strings=True):
"""
Test a binary operator.
Binary operators align on column name. For columns that don't exist in both
DataFrames, the column is filled with NaN (for non-comparison operations) and
or False (for comparison operations).
If the RHS is a Series, the Series should be added to all columns.
"""
df1, df2 = get_frames(pd.DataFrame, strings)
gdf1, gdf2 = get_frames(gr.GrizzlyDataFrame, strings)
expect = pd_op(df1, df2)
result = gr_op(gdf1, gdf2).to_pandas()
assert expect.equals(result)
def test_evaluation():
# Test to make sure that evaluating a DataFrame once caches the result/
# doesn't cause another evaluation.
df1 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'score': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df2 = gr.GrizzlyDataFrame({
'age': [20, 30, 35, 20, 50, 35],
'scores': [20.0, 30.0, 35.0, 50.0, 35.0, 25.0]
})
df3 = (df1 + df2) * df2 + df1 / df2
assert not df3.is_value
df3.evaluate()
assert df3.is_value
weld_value = df3.weld_value
df3.evaluate()
# The same weld_value should be returned.
assert weld_value is df3.weld_value
def test_add():
_test_binop(pd.DataFrame.add, gr.GrizzlyDataFrame.add, strings=False)
def test_sub():
_test_binop(pd.DataFrame.sub, gr.GrizzlyDataFrame.sub, strings=False)
def test_mul():
_test_binop(pd.DataFrame.mul, gr.GrizzlyDataFrame.mul, strings=False)
def test_div():
_test_binop(pd.DataFrame.div, gr.GrizzlyDataFrame.div, strings=False)
def test_eq():
_test_binop(pd.DataFrame.eq, gr.GrizzlyDataFrame.eq, strings=True)
def test_ne():
_test_binop(pd.DataFrame.ne, gr.GrizzlyDataFrame.ne, strings=True)
def test_le():
_test_binop(pd.DataFrame.le, gr.GrizzlyDataFrame.le, strings=False)
def test_lt():
_test_binop(pd.DataFrame.lt, gr.GrizzlyDataFrame.lt, strings=False)
def test_ge():
_test_binop(pd.DataFrame.ge, gr.GrizzlyDataFrame.ge, strings=False)
def test_gt():
_test_binop(pd.DataFrame.gt, gr.GrizzlyDataFrame.gt, strings=False)
|
{
"content_hash": "cf5a669ca6df238ce2d5a198b038eea7",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 81,
"avg_line_length": 31.67,
"alnum_prop": 0.6248815914114304,
"repo_name": "sppalkia/weld",
"id": "519b501fb021ba52fd9fd84b6faa31a7557aa394",
"size": "3167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "weld-python/tests/grizzly/core/test_frame.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "C",
"bytes": "660"
},
{
"name": "C++",
"bytes": "27987"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "301176"
},
{
"name": "Rust",
"bytes": "1127035"
},
{
"name": "Shell",
"bytes": "2090"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1100, 808)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(1100, 500))
MainWindow.setAutoFillBackground(False)
self.centralwidget = QtGui.QWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setMinimumSize(QtCore.QSize(1013, 0))
self.centralwidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.centralwidget.setAutoFillBackground(False)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_6 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_instructions_title = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_instructions_title.sizePolicy().hasHeightForWidth())
self.label_instructions_title.setSizePolicy(sizePolicy)
self.label_instructions_title.setMinimumSize(QtCore.QSize(0, 20))
font = QtGui.QFont()
font.setFamily("Serif")
font.setWeight(75)
font.setBold(True)
self.label_instructions_title.setFont(font)
self.label_instructions_title.setObjectName("label_instructions_title")
self.gridLayout_6.addWidget(self.label_instructions_title, 0, 0, 1, 1)
self.label_instructions = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_instructions.sizePolicy().hasHeightForWidth())
self.label_instructions.setSizePolicy(sizePolicy)
self.label_instructions.setMinimumSize(QtCore.QSize(500, 50))
self.label_instructions.setMaximumSize(QtCore.QSize(16777215, 50))
font = QtGui.QFont()
font.setFamily("Monospace")
font.setPointSize(18)
self.label_instructions.setFont(font)
self.label_instructions.setProperty("cursor", QtCore.Qt.IBeamCursor)
self.label_instructions.setFocusPolicy(QtCore.Qt.NoFocus)
self.label_instructions.setToolTip("")
self.label_instructions.setReadOnly(False)
self.label_instructions.setObjectName("label_instructions")
self.gridLayout_6.addWidget(self.label_instructions, 4, 0, 1, 1)
self.label_word_output = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.label_word_output.sizePolicy().hasHeightForWidth())
self.label_word_output.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Serif")
font.setWeight(75)
font.setBold(True)
self.label_word_output.setFont(font)
self.label_word_output.setTextFormat(QtCore.Qt.PlainText)
self.label_word_output.setObjectName("label_word_output")
self.gridLayout_6.addWidget(self.label_word_output, 7, 0, 1, 1)
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.channel_names = QtGui.QGraphicsView(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.channel_names.sizePolicy().hasHeightForWidth())
self.channel_names.setSizePolicy(sizePolicy)
self.channel_names.setMinimumSize(QtCore.QSize(217, 211))
font = QtGui.QFont()
font.setFamily("Monospace")
font.setPointSize(16)
self.channel_names.setFont(font)
self.channel_names.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.channel_names.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.channel_names.setObjectName("channel_names")
self.gridLayout_5.addWidget(self.channel_names, 2, 0, 1, 1)
self.label_channel_names = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Serif")
font.setWeight(75)
font.setBold(True)
self.label_channel_names.setFont(font)
self.label_channel_names.setObjectName("label_channel_names")
self.gridLayout_5.addWidget(self.label_channel_names, 1, 0, 1, 1)
self.label_letter_likelihoods = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_letter_likelihoods.sizePolicy().hasHeightForWidth())
self.label_letter_likelihoods.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setWeight(75)
font.setItalic(False)
font.setBold(True)
self.label_letter_likelihoods.setFont(font)
self.label_letter_likelihoods.setFrameShape(QtGui.QFrame.StyledPanel)
self.label_letter_likelihoods.setFrameShadow(QtGui.QFrame.Sunken)
self.label_letter_likelihoods.setLineWidth(1)
self.label_letter_likelihoods.setTextFormat(QtCore.Qt.PlainText)
self.label_letter_likelihoods.setObjectName("label_letter_likelihoods")
self.gridLayout_5.addWidget(self.label_letter_likelihoods, 1, 1, 1, 1)
self.label_click_distribution = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Serif")
font.setWeight(75)
font.setBold(True)
self.label_click_distribution.setFont(font)
self.label_click_distribution.setObjectName("label_click_distribution")
self.gridLayout_5.addWidget(self.label_click_distribution, 1, 4, 1, 1)
self.best_words_disp = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.best_words_disp.sizePolicy().hasHeightForWidth())
self.best_words_disp.setSizePolicy(sizePolicy)
self.best_words_disp.setMinimumSize(QtCore.QSize(170, 211))
font = QtGui.QFont()
font.setFamily("Monospace")
font.setPointSize(12)
self.best_words_disp.setFont(font)
self.best_words_disp.setFocusPolicy(QtCore.Qt.NoFocus)
self.best_words_disp.setLineWidth(3)
self.best_words_disp.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.best_words_disp.setObjectName("best_words_disp")
self.gridLayout_5.addWidget(self.best_words_disp, 2, 2, 1, 1)
self.label_best_words = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_best_words.sizePolicy().hasHeightForWidth())
self.label_best_words.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Serif")
font.setWeight(75)
font.setBold(True)
self.label_best_words.setFont(font)
self.label_best_words.setObjectName("label_best_words")
self.gridLayout_5.addWidget(self.label_best_words, 1, 2, 1, 1)
self.alphabet_likelihood_view = QtGui.QGraphicsView(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.alphabet_likelihood_view.sizePolicy().hasHeightForWidth())
self.alphabet_likelihood_view.setSizePolicy(sizePolicy)
self.alphabet_likelihood_view.setMinimumSize(QtCore.QSize(310, 211))
self.alphabet_likelihood_view.setMaximumSize(QtCore.QSize(320, 211))
font = QtGui.QFont()
font.setFamily("Monospace")
font.setPointSize(16)
font.setWeight(50)
font.setBold(False)
self.alphabet_likelihood_view.setFont(font)
self.alphabet_likelihood_view.setFocusPolicy(QtCore.Qt.NoFocus)
self.alphabet_likelihood_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.alphabet_likelihood_view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.alphabet_likelihood_view.setObjectName("alphabet_likelihood_view")
self.gridLayout_5.addWidget(self.alphabet_likelihood_view, 2, 1, 1, 1)
self.click_distribution_view = QtGui.QWidget(self.centralwidget)
self.click_distribution_view.setMinimumSize(QtCore.QSize(500, 211))
self.click_distribution_view.setObjectName("click_distribution_view")
self.gridLayout_5.addWidget(self.click_distribution_view, 2, 4, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem, 3, 1, 1, 1)
self.gridLayout_6.addLayout(self.gridLayout_5, 9, 0, 1, 7)
self.selected_words_disp = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.selected_words_disp.sizePolicy().hasHeightForWidth())
self.selected_words_disp.setSizePolicy(sizePolicy)
self.selected_words_disp.setMinimumSize(QtCore.QSize(500, 80))
self.selected_words_disp.setMaximumSize(QtCore.QSize(16777215, 80))
font = QtGui.QFont()
font.setFamily("Monospace")
font.setPointSize(12)
self.selected_words_disp.setFont(font)
self.selected_words_disp.setFocusPolicy(QtCore.Qt.NoFocus)
self.selected_words_disp.setToolTip("")
self.selected_words_disp.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.selected_words_disp.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.selected_words_disp.setObjectName("selected_words_disp")
self.gridLayout_6.addWidget(self.selected_words_disp, 8, 0, 1, 1)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.gridLayout.setObjectName("gridLayout")
self.clear_button = QtGui.QPushButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clear_button.sizePolicy().hasHeightForWidth())
self.clear_button.setSizePolicy(sizePolicy)
self.clear_button.setMinimumSize(QtCore.QSize(60, 50))
font = QtGui.QFont()
font.setFamily("Serif")
font.setWeight(75)
font.setBold(True)
self.clear_button.setFont(font)
self.clear_button.setFocusPolicy(QtCore.Qt.NoFocus)
self.clear_button.setCheckable(False)
self.clear_button.setObjectName("clear_button")
self.gridLayout.addWidget(self.clear_button, 2, 0, 1, 1)
self.scrollbar_letter_speed = QtGui.QSlider(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollbar_letter_speed.sizePolicy().hasHeightForWidth())
self.scrollbar_letter_speed.setSizePolicy(sizePolicy)
self.scrollbar_letter_speed.setMinimumSize(QtCore.QSize(200, 0))
self.scrollbar_letter_speed.setMinimum(50)
self.scrollbar_letter_speed.setMaximum(95)
self.scrollbar_letter_speed.setPageStep(1)
self.scrollbar_letter_speed.setProperty("value", 65)
self.scrollbar_letter_speed.setOrientation(QtCore.Qt.Horizontal)
self.scrollbar_letter_speed.setObjectName("scrollbar_letter_speed")
self.gridLayout.addWidget(self.scrollbar_letter_speed, 5, 0, 1, 1)
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_letter_speed = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Serif")
font.setPointSize(10)
font.setWeight(75)
font.setBold(True)
self.label_letter_speed.setFont(font)
self.label_letter_speed.setObjectName("label_letter_speed")
self.gridLayout_3.addWidget(self.label_letter_speed, 3, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem1, 2, 0, 1, 1)
self.gridLayout.addLayout(self.gridLayout_3, 3, 0, 1, 1)
self.button_pause = QtGui.QPushButton(self.centralwidget)
self.button_pause.setMinimumSize(QtCore.QSize(60, 50))
font = QtGui.QFont()
font.setFamily("Serif")
font.setWeight(75)
font.setBold(True)
self.button_pause.setFont(font)
self.button_pause.setFocusPolicy(QtCore.Qt.NoFocus)
self.button_pause.setCheckable(True)
self.button_pause.setObjectName("button_pause")
self.gridLayout.addWidget(self.button_pause, 0, 0, 1, 1)
self.gridLayout_6.addLayout(self.gridLayout, 4, 6, 5, 1)
self.phrase_disp = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.phrase_disp.sizePolicy().hasHeightForWidth())
self.phrase_disp.setSizePolicy(sizePolicy)
self.phrase_disp.setMinimumSize(QtCore.QSize(700, 50))
font = QtGui.QFont()
font.setFamily("Monospace")
font.setPointSize(18)
self.phrase_disp.setFont(font)
self.phrase_disp.setFocusPolicy(QtCore.Qt.NoFocus)
self.phrase_disp.setStyleSheet("border-style:ridge;\n"
"border-color: rgb(255, 92, 144);\n"
"border-width:5px;\n"
"")
self.phrase_disp.setObjectName("phrase_disp")
self.gridLayout_6.addWidget(self.phrase_disp, 6, 0, 1, 1)
self.label_phrases = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.label_phrases.sizePolicy().hasHeightForWidth())
self.label_phrases.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Serif")
font.setWeight(75)
font.setBold(True)
self.label_phrases.setFont(font)
self.label_phrases.setObjectName("label_phrases")
self.gridLayout_6.addWidget(self.label_phrases, 5, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1100, 23))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
self.menuView = QtGui.QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuControl = QtGui.QMenu(self.menubar)
self.menuControl.setObjectName("menuControl")
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
self.menuMode = QtGui.QMenu(self.menubar)
self.menuMode.setObjectName("menuMode")
self.menuDisplay = QtGui.QMenu(self.menubar)
self.menuDisplay.setObjectName("menuDisplay")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.alphabet_widget_display = QtGui.QDockWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.alphabet_widget_display.sizePolicy().hasHeightForWidth())
self.alphabet_widget_display.setSizePolicy(sizePolicy)
self.alphabet_widget_display.setMinimumSize(QtCore.QSize(1200, 235))
self.alphabet_widget_display.setObjectName("alphabet_widget_display")
self.alphabet_widget = QtGui.QWidget()
self.alphabet_widget.setObjectName("alphabet_widget")
self.alphabet_view = QtGui.QGraphicsView(self.alphabet_widget)
self.alphabet_view.setGeometry(QtCore.QRect(10, 0, 1000, 180))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.alphabet_view.sizePolicy().hasHeightForWidth())
self.alphabet_view.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Monospace")
font.setPointSize(16)
self.alphabet_view.setFont(font)
self.alphabet_view.setFocusPolicy(QtCore.Qt.NoFocus)
self.alphabet_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.alphabet_view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.alphabet_view.setObjectName("alphabet_view")
self.alphabet_widget_display.setWidget(self.alphabet_widget)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(8), self.alphabet_widget_display)
self.edit_alphabet = QtGui.QAction(MainWindow)
self.edit_alphabet.setObjectName("edit_alphabet")
self.action_click_selections = QtGui.QAction(MainWindow)
self.action_click_selections.setCheckable(True)
self.action_click_selections.setChecked(True)
self.action_click_selections.setObjectName("action_click_selections")
self.action_click_distribution = QtGui.QAction(MainWindow)
self.action_click_distribution.setCheckable(True)
self.action_click_distribution.setChecked(True)
self.action_click_distribution.setObjectName("action_click_distribution")
self.action_best_words = QtGui.QAction(MainWindow)
self.action_best_words.setCheckable(True)
self.action_best_words.setChecked(True)
self.action_best_words.setObjectName("action_best_words")
self.action_open = QtGui.QAction(MainWindow)
self.action_open.setCheckable(False)
self.action_open.setEnabled(True)
self.action_open.setObjectName("action_open")
self.action_letter_likelihoods = QtGui.QAction(MainWindow)
self.action_letter_likelihoods.setCheckable(True)
self.action_letter_likelihoods.setChecked(True)
self.action_letter_likelihoods.setObjectName("action_letter_likelihoods")
self.action_minimum_view = QtGui.QAction(MainWindow)
self.action_minimum_view.setCheckable(True)
self.action_minimum_view.setChecked(False)
self.action_minimum_view.setObjectName("action_minimum_view")
self.action_dictionary = QtGui.QAction(MainWindow)
self.action_dictionary.setCheckable(False)
self.action_dictionary.setObjectName("action_dictionary")
self.action_save = QtGui.QAction(MainWindow)
self.action_save.setCheckable(False)
self.action_save.setEnabled(True)
self.action_save.setObjectName("action_save")
self.action_close = QtGui.QAction(MainWindow)
self.action_close.setObjectName("action_close")
self.action_alphabet = QtGui.QAction(MainWindow)
self.action_alphabet.setCheckable(True)
self.action_alphabet.setChecked(True)
self.action_alphabet.setObjectName("action_alphabet")
self.actionB = QtGui.QAction(MainWindow)
self.actionB.setObjectName("actionB")
self.action_volume = QtGui.QAction(MainWindow)
self.action_volume.setObjectName("action_volume")
self.action_settings = QtGui.QAction(MainWindow)
self.action_settings.setCheckable(False)
self.action_settings.setObjectName("action_settings")
self.action_space_bar = QtGui.QAction(MainWindow)
self.action_space_bar.setCheckable(True)
self.action_space_bar.setChecked(True)
self.action_space_bar.setObjectName("action_space_bar")
self.action_port = QtGui.QAction(MainWindow)
self.action_port.setCheckable(True)
self.action_port.setChecked(True)
self.action_port.setObjectName("action_port")
self.action_about_ticker = QtGui.QAction(MainWindow)
self.action_about_ticker.setObjectName("action_about_ticker")
self.action_clear = QtGui.QAction(MainWindow)
self.action_clear.setObjectName("action_clear")
self.action_calibrate = QtGui.QAction(MainWindow)
self.action_calibrate.setCheckable(True)
self.action_calibrate.setChecked(True)
self.action_calibrate.setObjectName("action_calibrate")
self.action_practise = QtGui.QAction(MainWindow)
self.action_practise.setCheckable(True)
self.action_practise.setObjectName("action_practise")
self.action_tutorial = QtGui.QAction(MainWindow)
self.action_tutorial.setCheckable(True)
self.action_tutorial.setVisible(False)
self.action_tutorial.setObjectName("action_tutorial")
self.action_inc_phrases = QtGui.QAction(MainWindow)
self.action_inc_phrases.setCheckable(True)
self.action_inc_phrases.setChecked(False)
self.action_inc_phrases.setVisible(False)
self.action_inc_phrases.setObjectName("action_inc_phrases")
self.action_fast_mode = QtGui.QAction(MainWindow)
self.action_fast_mode.setCheckable(True)
self.action_fast_mode.setChecked(True)
self.action_fast_mode.setObjectName("action_fast_mode")
self.action_clear_2 = QtGui.QAction(MainWindow)
self.action_clear_2.setObjectName("action_clear_2")
self.menuFile.addAction(self.action_open)
self.menuFile.addAction(self.action_save)
self.menuFile.addAction(self.action_close)
self.menuEdit.addAction(self.action_dictionary)
self.menuEdit.addAction(self.action_volume)
self.menuEdit.addAction(self.action_settings)
self.menuView.addAction(self.action_alphabet)
self.menuControl.addAction(self.action_space_bar)
self.menuControl.addAction(self.action_port)
self.menuHelp.addAction(self.action_about_ticker)
self.menuMode.addAction(self.action_calibrate)
self.menuMode.addAction(self.action_fast_mode)
self.menuMode.addAction(self.action_practise)
self.menuMode.addAction(self.action_tutorial)
self.menuMode.addAction(self.action_inc_phrases)
self.menuDisplay.addAction(self.action_clear_2)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuControl.menuAction())
self.menubar.addAction(self.menuMode.menuAction())
self.menubar.addAction(self.menuDisplay.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Ticker", None, QtGui.QApplication.UnicodeUTF8))
self.label_instructions_title.setToolTip(QtGui.QApplication.translate("MainWindow", "Visual instructions", None, QtGui.QApplication.UnicodeUTF8))
self.label_instructions_title.setText(QtGui.QApplication.translate("MainWindow", "Instructions:", None, QtGui.QApplication.UnicodeUTF8))
self.label_instructions.setStyleSheet(QtGui.QApplication.translate("MainWindow", "border-style:ridge;\n"
"border-color: rgb(92, 114, 255);\n"
"border-width:5px;\n"
"", None, QtGui.QApplication.UnicodeUTF8))
self.label_word_output.setToolTip(QtGui.QApplication.translate("MainWindow", "Word selections are displayed here", None, QtGui.QApplication.UnicodeUTF8))
self.label_word_output.setText(QtGui.QApplication.translate("MainWindow", "Output sentences:", None, QtGui.QApplication.UnicodeUTF8))
self.label_channel_names.setToolTip(QtGui.QApplication.translate("MainWindow", "Names of the people reading different parts of the alphabet", None, QtGui.QApplication.UnicodeUTF8))
self.label_channel_names.setText(QtGui.QApplication.translate("MainWindow", "Names:", None, QtGui.QApplication.UnicodeUTF8))
self.label_letter_likelihoods.setToolTip(QtGui.QApplication.translate("MainWindow", "After a click the more likely letters will be darker", None, QtGui.QApplication.UnicodeUTF8))
self.label_letter_likelihoods.setText(QtGui.QApplication.translate("MainWindow", "Letter likelihoods:", None, QtGui.QApplication.UnicodeUTF8))
self.label_click_distribution.setToolTip(QtGui.QApplication.translate("MainWindow", "Visualisation of click delay distribution", None, QtGui.QApplication.UnicodeUTF8))
self.label_click_distribution.setText(QtGui.QApplication.translate("MainWindow", "Click distribution:", None, QtGui.QApplication.UnicodeUTF8))
self.label_best_words.setToolTip(QtGui.QApplication.translate("MainWindow", "Display of most probable words in dictionary.", None, QtGui.QApplication.UnicodeUTF8))
self.label_best_words.setText(QtGui.QApplication.translate("MainWindow", "Most probable words:", None, QtGui.QApplication.UnicodeUTF8))
self.selected_words_disp.setHtml(QtGui.QApplication.translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Monospace\'; font-size:12pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"> </p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.clear_button.setText(QtGui.QApplication.translate("MainWindow", "Restart", None, QtGui.QApplication.UnicodeUTF8))
self.label_letter_speed.setToolTip(QtGui.QApplication.translate("MainWindow", "Sound overlap, if=0.0 all voice will speak simultaneously.", None, QtGui.QApplication.UnicodeUTF8))
self.label_letter_speed.setText(QtGui.QApplication.translate("MainWindow", "Speed:", None, QtGui.QApplication.UnicodeUTF8))
self.button_pause.setText(QtGui.QApplication.translate("MainWindow", "Play", None, QtGui.QApplication.UnicodeUTF8))
self.label_phrases.setText(QtGui.QApplication.translate("MainWindow", "Phrases:", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("MainWindow", "File", None, QtGui.QApplication.UnicodeUTF8))
self.menuEdit.setTitle(QtGui.QApplication.translate("MainWindow", "Edit", None, QtGui.QApplication.UnicodeUTF8))
self.menuView.setTitle(QtGui.QApplication.translate("MainWindow", "View", None, QtGui.QApplication.UnicodeUTF8))
self.menuControl.setTitle(QtGui.QApplication.translate("MainWindow", "Control", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("MainWindow", "Help", None, QtGui.QApplication.UnicodeUTF8))
self.menuMode.setTitle(QtGui.QApplication.translate("MainWindow", "Mode", None, QtGui.QApplication.UnicodeUTF8))
self.menuDisplay.setTitle(QtGui.QApplication.translate("MainWindow", "Display", None, QtGui.QApplication.UnicodeUTF8))
self.alphabet_widget_display.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Tuning in with visual assistance", None, QtGui.QApplication.UnicodeUTF8))
self.edit_alphabet.setText(QtGui.QApplication.translate("MainWindow", "Alphabet", None, QtGui.QApplication.UnicodeUTF8))
self.action_click_selections.setText(QtGui.QApplication.translate("MainWindow", "Selected letters/words", None, QtGui.QApplication.UnicodeUTF8))
self.action_click_distribution.setText(QtGui.QApplication.translate("MainWindow", "Click distribution", None, QtGui.QApplication.UnicodeUTF8))
self.action_best_words.setText(QtGui.QApplication.translate("MainWindow", "Most probable words", None, QtGui.QApplication.UnicodeUTF8))
self.action_open.setText(QtGui.QApplication.translate("MainWindow", "Open", None, QtGui.QApplication.UnicodeUTF8))
self.action_letter_likelihoods.setText(QtGui.QApplication.translate("MainWindow", "Letter likelhoods", None, QtGui.QApplication.UnicodeUTF8))
self.action_minimum_view.setText(QtGui.QApplication.translate("MainWindow", "Show minimum", None, QtGui.QApplication.UnicodeUTF8))
self.action_dictionary.setText(QtGui.QApplication.translate("MainWindow", "Dictionary", None, QtGui.QApplication.UnicodeUTF8))
self.action_save.setText(QtGui.QApplication.translate("MainWindow", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.action_close.setText(QtGui.QApplication.translate("MainWindow", "Close", None, QtGui.QApplication.UnicodeUTF8))
self.action_close.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+W", None, QtGui.QApplication.UnicodeUTF8))
self.action_alphabet.setText(QtGui.QApplication.translate("MainWindow", "Alphabet", None, QtGui.QApplication.UnicodeUTF8))
self.actionB.setText(QtGui.QApplication.translate("MainWindow", "b", None, QtGui.QApplication.UnicodeUTF8))
self.action_volume.setText(QtGui.QApplication.translate("MainWindow", "Volume", None, QtGui.QApplication.UnicodeUTF8))
self.action_settings.setText(QtGui.QApplication.translate("MainWindow", "Settings", None, QtGui.QApplication.UnicodeUTF8))
self.action_space_bar.setText(QtGui.QApplication.translate("MainWindow", "Space bar", None, QtGui.QApplication.UnicodeUTF8))
self.action_port.setText(QtGui.QApplication.translate("MainWindow", "Port 20320", None, QtGui.QApplication.UnicodeUTF8))
self.action_about_ticker.setText(QtGui.QApplication.translate("MainWindow", "About Ticker", None, QtGui.QApplication.UnicodeUTF8))
self.action_clear.setText(QtGui.QApplication.translate("MainWindow", "Clear", None, QtGui.QApplication.UnicodeUTF8))
self.action_calibrate.setText(QtGui.QApplication.translate("MainWindow", "Calibrate (\"yes_\")", None, QtGui.QApplication.UnicodeUTF8))
self.action_practise.setText(QtGui.QApplication.translate("MainWindow", "Practise Clicks", None, QtGui.QApplication.UnicodeUTF8))
self.action_tutorial.setText(QtGui.QApplication.translate("MainWindow", "Tutorial", None, QtGui.QApplication.UnicodeUTF8))
self.action_inc_phrases.setText(QtGui.QApplication.translate("MainWindow", "Incremet Phrases", None, QtGui.QApplication.UnicodeUTF8))
self.action_fast_mode.setText(QtGui.QApplication.translate("MainWindow", "Fast Mode", None, QtGui.QApplication.UnicodeUTF8))
self.action_clear_2.setText(QtGui.QApplication.translate("MainWindow", "Clear", None, QtGui.QApplication.UnicodeUTF8))
|
{
"content_hash": "67ce35d63f6ed6e5565c01bcdcd66dd2",
"timestamp": "",
"source": "github",
"line_count": 504,
"max_line_length": 188,
"avg_line_length": 64.95238095238095,
"alnum_prop": 0.7231182795698925,
"repo_name": "singleswitch/ticker",
"id": "4f24ad50ae74cd6620ca06143c58e292c0da635a",
"size": "32970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ticker_layout.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5053"
},
{
"name": "C++",
"bytes": "23830"
},
{
"name": "Makefile",
"bytes": "1049"
},
{
"name": "Python",
"bytes": "1004504"
},
{
"name": "Shell",
"bytes": "7102"
},
{
"name": "TeX",
"bytes": "12004"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20180128_2257'),
]
operations = [
migrations.AlterField(
model_name='user',
name='id',
field=models.CharField(db_index=True, default='97adb9b1052f11e89896708bcdd0cf1e', max_length=255, primary_key=True, serialize=False, unique=True),
),
]
|
{
"content_hash": "a12fa06faf5adeb2ddc9e5d34d24b966",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 158,
"avg_line_length": 26.555555555555557,
"alnum_prop": 0.6359832635983264,
"repo_name": "DiegoCorrea/ouvidoMusical",
"id": "c8c8f011a809f6113b0ff315faeec208001682b0",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/data/users/migrations/0003_auto_20180129_2004.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182332"
},
{
"name": "Shell",
"bytes": "51486"
}
],
"symlink_target": ""
}
|
"""
test correct setup/teardowns at
module, class, and instance level
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_module_and_function_setup(testdir):
reprec = testdir.inline_runsource("""
modlevel = []
def setup_module(module):
assert not modlevel
module.modlevel.append(42)
def teardown_module(module):
modlevel.pop()
def setup_function(function):
function.answer = 17
def teardown_function(function):
del function.answer
def test_modlevel():
assert modlevel[0] == 42
assert test_modlevel.answer == 17
class TestFromClass(object):
def test_module(self):
assert modlevel[0] == 42
assert not hasattr(test_modlevel, 'answer')
""")
rep = reprec.matchreport("test_modlevel")
assert rep.passed
rep = reprec.matchreport("test_module")
assert rep.passed
def test_module_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
values = []
def setup_module(module):
values.append(1)
0/0
def test_nothing():
pass
def teardown_module(module):
values.append(2)
""")
reprec.assertoutcome(failed=1)
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.values == [1]
def test_setup_function_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
modlevel = []
def setup_function(function):
modlevel.append(1)
0/0
def teardown_function(module):
modlevel.append(2)
def test_func():
pass
""")
calls = reprec.getcalls("pytest_runtest_setup")
assert calls[0].item.module.modlevel == [1]
def test_class_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSimpleClassSetup(object):
clslevel = []
def setup_class(cls):
cls.clslevel.append(23)
def teardown_class(cls):
cls.clslevel.pop()
def test_classlevel(self):
assert self.clslevel[0] == 23
class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):
def test_classlevel_anothertime(self):
assert self.clslevel == [23]
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
assert not TestInheritedClassSetupStillWorks.clslevel
""")
reprec.assertoutcome(passed=1 + 2 + 1)
def test_class_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
class TestSimpleClassSetup(object):
clslevel = []
def setup_class(cls):
0/0
def teardown_class(cls):
cls.clslevel.append(1)
def test_classlevel(self):
pass
def test_cleanup():
assert not TestSimpleClassSetup.clslevel
""")
reprec.assertoutcome(failed=1, passed=1)
def test_method_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSetupMethod(object):
def setup_method(self, meth):
self.methsetup = meth
def teardown_method(self, meth):
del self.methsetup
def test_some(self):
assert self.methsetup == self.test_some
def test_other(self):
assert self.methsetup == self.test_other
""")
reprec.assertoutcome(passed=2)
def test_method_setup_failure_no_teardown(testdir):
reprec = testdir.inline_runsource("""
class TestMethodSetup(object):
clslevel = []
def setup_method(self, method):
self.clslevel.append(1)
0/0
def teardown_method(self, method):
self.clslevel.append(2)
def test_method(self):
pass
def test_cleanup():
assert TestMethodSetup.clslevel == [1]
""")
reprec.assertoutcome(failed=1, passed=1)
def test_method_generator_setup(testdir):
reprec = testdir.inline_runsource("""
class TestSetupTeardownOnInstance(object):
def setup_class(cls):
cls.classsetup = True
def setup_method(self, method):
self.methsetup = method
def test_generate(self):
assert self.classsetup
assert self.methsetup == self.test_generate
yield self.generated, 5
yield self.generated, 2
def generated(self, value):
assert self.classsetup
assert self.methsetup == self.test_generate
assert value == 5
""")
reprec.assertoutcome(passed=1, failed=1)
def test_func_generator_setup(testdir):
reprec = testdir.inline_runsource("""
import sys
def setup_module(mod):
print ("setup_module")
mod.x = []
def setup_function(fun):
print ("setup_function")
x.append(1)
def teardown_function(fun):
print ("teardown_function")
x.pop()
def test_one():
assert x == [1]
def check():
print ("check")
sys.stderr.write("e\\n")
assert x == [1]
yield check
assert x == [1]
""")
rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
assert rep.passed
def test_method_setup_uses_fresh_instances(testdir):
reprec = testdir.inline_runsource("""
class TestSelfState1(object):
memory = []
def test_hello(self):
self.memory.append(self)
def test_afterhello(self):
assert self != self.memory[0]
""")
reprec.assertoutcome(passed=2, failed=0)
def test_setup_that_skips_calledagain(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
pytest.skip("x")
def test_function1():
pass
def test_function2():
pass
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(skipped=2)
def test_setup_fails_again_on_all_tests(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
raise ValueError(42)
def test_function1():
pass
def test_function2():
pass
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=2)
def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
p = testdir.makepyfile("""
import pytest
def setup_module(mod):
raise ValueError(42)
@pytest.fixture
def hello(request):
raise ValueError("xyz43")
def test_function1(hello):
pass
def test_function2(hello):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*function1*",
"*ValueError*42*",
"*function2*",
"*ValueError*42*",
"*2 error*"
])
assert "xyz43" not in result.stdout.str()
@pytest.mark.parametrize('arg', ['', 'arg'])
def test_setup_teardown_function_level_with_optional_argument(testdir, monkeypatch, arg):
"""parameter to setup/teardown xunit-style functions parameter is now optional (#1728)."""
import sys
trace_setups_teardowns = []
monkeypatch.setattr(sys, 'trace_setups_teardowns', trace_setups_teardowns, raising=False)
p = testdir.makepyfile("""
import pytest
import sys
trace = sys.trace_setups_teardowns.append
def setup_module({arg}): trace('setup_module')
def teardown_module({arg}): trace('teardown_module')
def setup_function({arg}): trace('setup_function')
def teardown_function({arg}): trace('teardown_function')
def test_function_1(): pass
def test_function_2(): pass
class Test(object):
def setup_method(self, {arg}): trace('setup_method')
def teardown_method(self, {arg}): trace('teardown_method')
def test_method_1(self): pass
def test_method_2(self): pass
""".format(arg=arg))
result = testdir.inline_run(p)
result.assertoutcome(passed=4)
expected = [
'setup_module',
'setup_function',
'teardown_function',
'setup_function',
'teardown_function',
'setup_method',
'teardown_method',
'setup_method',
'teardown_method',
'teardown_module',
]
assert trace_setups_teardowns == expected
|
{
"content_hash": "1b033f864ba2c0e5ba9fe400a16625dd",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 94,
"avg_line_length": 27.61755485893417,
"alnum_prop": 0.5662883087400681,
"repo_name": "tareqalayan/pytest",
"id": "fc931f86720ac3903bfa205b7887c4df6b4e407b",
"size": "8810",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "testing/test_runner_xunit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1554146"
}
],
"symlink_target": ""
}
|
import logging
from django.contrib.auth import authenticate, login
from .crowd_client import crowd_validate_session, CrowdError
log = logging.getLogger('kompassi_crowd')
class KompassiCrowdAuthenticationMiddleware(object):
def process_request(self, request):
if request.user.is_anonymous():
try:
username = crowd_validate_session(request)
except CrowdError as e:
log.error(e)
return None
if username is not None:
user = authenticate(username=username) # look, no password
if user is not None and not user.is_anonymous():
login(request, user)
log.debug("logged in {username} via Crowd".format(username=username))
return None
|
{
"content_hash": "e99a59e1bddff82f300ee9272cc7ac90",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 27.82758620689655,
"alnum_prop": 0.6158612143742255,
"repo_name": "mniemela/kirppu",
"id": "7dee88f9c578f35977c419dd3329ef1e45cdd921",
"size": "807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kompassi_crowd/middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7909"
},
{
"name": "CoffeeScript",
"bytes": "48657"
},
{
"name": "Gettext Catalog",
"bytes": "17496"
},
{
"name": "HTML",
"bytes": "21828"
},
{
"name": "JavaScript",
"bytes": "27770"
},
{
"name": "Makefile",
"bytes": "282"
},
{
"name": "Python",
"bytes": "97258"
}
],
"symlink_target": ""
}
|
"""Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Module for caching precomputed features.
"""
from pathlib import Path
import pickle
base_features_path = Path("./downloaded_data/features_cache/")
def get_cache_path(dataset, expert):
"""Gets the file path to cache embeddings from an expert.
Arguments:
dataset: a BaseDataset class for the dataset that the embeddings are
for.
expert: a BaseExpert class for the expert that the embeddings are from.
Returns:
A pathlib object that specifies the file location where the cache will
be placed.
"""
path_directory = base_features_path / dataset.dataset_name
path_directory.mkdir(parents=True, exist_ok=True)
return path_directory / (expert.name + ".pkl")
def cache_features_by_expert_and_dataset(dataset, expert, features):
"""Caches the given feature embeddings for the dataset/expert.
Arguments:
dataset: a BaseDataset class for the dataset that the embeddings are
for.
expert: a BaseExpert calss for the expert that the embeddings are from.
features: the computed features/embeddings to be cached.
Returns: None
"""
file_path = get_cache_path(dataset, expert)
with open(file_path, "wb") as file:
pickle.dump(features, file)
def get_cached_features_by_expert_and_dataset(dataset, expert):
"""Returns the cached feature embeddings for the dataset/expert.
Arguments:
dataset: a BaseDataset class for the dataset that the embeddings are
for.
expert: a BaseExpert class for the expert that the embeddings are from.
Returns: The previously cached features for the given dataset/expert.
Returns None if there is no value cached.
"""
file_path = get_cache_path(dataset, expert)
if not file_path.exists():
return None
with open(file_path, "rb") as file:
cached_features = pickle.load(file)
return cached_features
|
{
"content_hash": "5cbbf9d9aab98fbb6f193fb127bfb039",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 30.337349397590362,
"alnum_prop": 0.7029388403494837,
"repo_name": "googleinterns/via-content-understanding",
"id": "b2474ec4138f1ab385dc133579838ac0cdc1fcf3",
"size": "2518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "videoretrieval/cache/features_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "34775"
},
{
"name": "Python",
"bytes": "298662"
}
],
"symlink_target": ""
}
|
"""The tests for the Home Assistant HTTP component."""
from datetime import timedelta
from ipaddress import ip_network
from unittest.mock import patch
from aiohttp import BasicAuth, web
from aiohttp.web_exceptions import HTTPUnauthorized
import pytest
from homeassistant.auth.providers import trusted_networks
from homeassistant.components.http.auth import async_sign_path, setup_auth
from homeassistant.components.http.const import KEY_AUTHENTICATED
from homeassistant.components.http.forwarded import async_setup_forwarded
from homeassistant.setup import async_setup_component
from . import HTTP_HEADER_HA_AUTH, mock_real_ip
API_PASSWORD = "test-password"
# Don't add 127.0.0.1/::1 as trusted, as it may interfere with other test cases
TRUSTED_NETWORKS = [
ip_network("192.0.2.0/24"),
ip_network("2001:DB8:ABCD::/48"),
ip_network("100.64.0.1"),
ip_network("FD01:DB8::1"),
]
TRUSTED_ADDRESSES = ["100.64.0.1", "192.0.2.100", "FD01:DB8::1", "2001:DB8:ABCD::1"]
UNTRUSTED_ADDRESSES = ["198.51.100.1", "2001:DB8:FA1::1", "127.0.0.1", "::1"]
async def mock_handler(request):
"""Return if request was authenticated."""
if not request[KEY_AUTHENTICATED]:
raise HTTPUnauthorized
user = request.get("hass_user")
user_id = user.id if user else None
return web.json_response(status=200, data={"user_id": user_id})
async def get_legacy_user(auth):
"""Get the user in legacy_api_password auth provider."""
provider = auth.get_auth_provider("legacy_api_password", None)
return await auth.async_get_or_create_user(
await provider.async_get_or_create_credentials({})
)
@pytest.fixture
def app(hass):
"""Fixture to set up a web.Application."""
app = web.Application()
app["hass"] = hass
app.router.add_get("/", mock_handler)
async_setup_forwarded(app, [])
return app
@pytest.fixture
def app2(hass):
"""Fixture to set up a web.Application without real_ip middleware."""
app = web.Application()
app["hass"] = hass
app.router.add_get("/", mock_handler)
return app
@pytest.fixture
def trusted_networks_auth(hass):
"""Load trusted networks auth provider."""
prv = trusted_networks.TrustedNetworksAuthProvider(
hass,
hass.auth._store,
{"type": "trusted_networks", "trusted_networks": TRUSTED_NETWORKS},
)
hass.auth._providers[(prv.type, prv.id)] = prv
return prv
async def test_auth_middleware_loaded_by_default(hass):
"""Test accessing to server from banned IP when feature is off."""
with patch("homeassistant.components.http.setup_auth") as mock_setup:
await async_setup_component(hass, "http", {"http": {}})
assert len(mock_setup.mock_calls) == 1
async def test_cant_access_with_password_in_header(
app, aiohttp_client, legacy_auth, hass
):
"""Test access with password in header."""
setup_auth(hass, app)
client = await aiohttp_client(app)
req = await client.get("/", headers={HTTP_HEADER_HA_AUTH: API_PASSWORD})
assert req.status == 401
req = await client.get("/", headers={HTTP_HEADER_HA_AUTH: "wrong-pass"})
assert req.status == 401
async def test_cant_access_with_password_in_query(
app, aiohttp_client, legacy_auth, hass
):
"""Test access with password in URL."""
setup_auth(hass, app)
client = await aiohttp_client(app)
resp = await client.get("/", params={"api_password": API_PASSWORD})
assert resp.status == 401
resp = await client.get("/")
assert resp.status == 401
resp = await client.get("/", params={"api_password": "wrong-password"})
assert resp.status == 401
async def test_basic_auth_does_not_work(app, aiohttp_client, hass, legacy_auth):
"""Test access with basic authentication."""
setup_auth(hass, app)
client = await aiohttp_client(app)
req = await client.get("/", auth=BasicAuth("homeassistant", API_PASSWORD))
assert req.status == 401
req = await client.get("/", auth=BasicAuth("wrong_username", API_PASSWORD))
assert req.status == 401
req = await client.get("/", auth=BasicAuth("homeassistant", "wrong password"))
assert req.status == 401
req = await client.get("/", headers={"authorization": "NotBasic abcdefg"})
assert req.status == 401
async def test_cannot_access_with_trusted_ip(
hass, app2, trusted_networks_auth, aiohttp_client, hass_owner_user
):
"""Test access with an untrusted ip address."""
setup_auth(hass, app2)
set_mock_ip = mock_real_ip(app2)
client = await aiohttp_client(app2)
for remote_addr in UNTRUSTED_ADDRESSES:
set_mock_ip(remote_addr)
resp = await client.get("/")
assert resp.status == 401, f"{remote_addr} shouldn't be trusted"
for remote_addr in TRUSTED_ADDRESSES:
set_mock_ip(remote_addr)
resp = await client.get("/")
assert resp.status == 401, f"{remote_addr} shouldn't be trusted"
async def test_auth_active_access_with_access_token_in_header(
hass, app, aiohttp_client, hass_access_token
):
"""Test access with access token in header."""
token = hass_access_token
setup_auth(hass, app)
client = await aiohttp_client(app)
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
req = await client.get("/", headers={"Authorization": f"Bearer {token}"})
assert req.status == 200
assert await req.json() == {"user_id": refresh_token.user.id}
req = await client.get("/", headers={"AUTHORIZATION": f"Bearer {token}"})
assert req.status == 200
assert await req.json() == {"user_id": refresh_token.user.id}
req = await client.get("/", headers={"authorization": f"Bearer {token}"})
assert req.status == 200
assert await req.json() == {"user_id": refresh_token.user.id}
req = await client.get("/", headers={"Authorization": token})
assert req.status == 401
req = await client.get("/", headers={"Authorization": f"BEARER {token}"})
assert req.status == 401
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
refresh_token.user.is_active = False
req = await client.get("/", headers={"Authorization": f"Bearer {token}"})
assert req.status == 401
async def test_auth_active_access_with_trusted_ip(
hass, app2, trusted_networks_auth, aiohttp_client, hass_owner_user
):
"""Test access with an untrusted ip address."""
setup_auth(hass, app2)
set_mock_ip = mock_real_ip(app2)
client = await aiohttp_client(app2)
for remote_addr in UNTRUSTED_ADDRESSES:
set_mock_ip(remote_addr)
resp = await client.get("/")
assert resp.status == 401, f"{remote_addr} shouldn't be trusted"
for remote_addr in TRUSTED_ADDRESSES:
set_mock_ip(remote_addr)
resp = await client.get("/")
assert resp.status == 401, f"{remote_addr} shouldn't be trusted"
async def test_auth_legacy_support_api_password_cannot_access(
app, aiohttp_client, legacy_auth, hass
):
"""Test access using api_password if auth.support_legacy."""
setup_auth(hass, app)
client = await aiohttp_client(app)
req = await client.get("/", headers={HTTP_HEADER_HA_AUTH: API_PASSWORD})
assert req.status == 401
resp = await client.get("/", params={"api_password": API_PASSWORD})
assert resp.status == 401
req = await client.get("/", auth=BasicAuth("homeassistant", API_PASSWORD))
assert req.status == 401
async def test_auth_access_signed_path(hass, app, aiohttp_client, hass_access_token):
"""Test access with signed url."""
app.router.add_post("/", mock_handler)
app.router.add_get("/another_path", mock_handler)
setup_auth(hass, app)
client = await aiohttp_client(app)
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
signed_path = async_sign_path(hass, refresh_token.id, "/", timedelta(seconds=5))
req = await client.get(signed_path)
assert req.status == 200
data = await req.json()
assert data["user_id"] == refresh_token.user.id
# Use signature on other path
req = await client.get("/another_path?{}".format(signed_path.split("?")[1]))
assert req.status == 401
# We only allow GET
req = await client.post(signed_path)
assert req.status == 401
# Never valid as expired in the past.
expired_signed_path = async_sign_path(
hass, refresh_token.id, "/", timedelta(seconds=-5)
)
req = await client.get(expired_signed_path)
assert req.status == 401
# refresh token gone should also invalidate signature
await hass.auth.async_remove_refresh_token(refresh_token)
req = await client.get(signed_path)
assert req.status == 401
|
{
"content_hash": "0c0af4c1a0bbe3da55de9bd3dbfb95ec",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 85,
"avg_line_length": 33.136882129277566,
"alnum_prop": 0.6707974756167527,
"repo_name": "adrienbrault/home-assistant",
"id": "71c01630a671e6eee91b60d8dbd72a79001f5061",
"size": "8715",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/http/test_auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
import db_objects as db
import Database
import datetime
import Functions
def movement_average_for_stamp(session, stamp):
current_trips = Functions.current_trips(session, 1)
deltas = [Functions.trip_movement(session, x, stamp, datetime.timedelta(minutes=6)) for x in
current_trips]
deltas = [x for x in deltas if x != -1]
return sum(deltas) / float(len(deltas))
def last_n_minutes(session, minutes=30):
averages = []
current_time = datetime.datetime.utcnow()
while current_time > datetime.datetime.utcnow() - datetime.timedelta(minutes=minutes):
averages.append(movement_average_for_stamp(session, current_time))
current_time = current_time - datetime.timedelta(minutes=6)
return averages
session = Database.connect()
print(movement_average_for_stamp(session, datetime.datetime.utcnow()))
# print(last_n_minutes(session))
|
{
"content_hash": "b9113d08c7abdd7daa503c7abe9e7753",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 96,
"avg_line_length": 30.75862068965517,
"alnum_prop": 0.7130044843049327,
"repo_name": "nettube/mbtapuller",
"id": "d0fc9d856e5c4507e07f33566071339aa60987e3",
"size": "892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "print_current_status.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1030"
},
{
"name": "HTML",
"bytes": "9705"
},
{
"name": "JavaScript",
"bytes": "46280"
},
{
"name": "Python",
"bytes": "50128"
}
],
"symlink_target": ""
}
|
'''
@author: Frank
'''
import os
import zstackwoodpecker.operations.deploy_operations as deploy_operations
import zstackwoodpecker.operations.config_operations as config_operations
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
USER_PATH = os.path.expanduser('~')
EXTRA_SUITE_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_suite_setup_config.sh' % USER_PATH
EXTRA_ARM_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_arm_setup_config.sh' % USER_PATH
def test():
try:
test_lib.setup_plan.deploy_test_agent()
except:
pass
if os.path.exists(EXTRA_ARM_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_ARM_SETUP_SCRIPT)
test_lib.setup_plan.execute_plan_without_deploy_test_agent()
if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)
deploy_operations.deploy_initial_database(test_lib.deploy_config)
test_util.test_pass('Suite Setup Success')
|
{
"content_hash": "dd619700cad5b4f2e6d99887766fcaec",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 89,
"avg_line_length": 32.46666666666667,
"alnum_prop": 0.728952772073922,
"repo_name": "zstackio/zstack-woodpecker",
"id": "e1a8e1d91e5f349b6559f63525e5201c4c91f50e",
"size": "974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/arm/suite_setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
}
|
"""
Common utilities.
"""
import os.path
import sys
import traceback
__all__ = [
'wraps',
'Emitter',
'samefile',
'sameuuid',
'setdefault',
'extend',
'decode_ay',
'exc_message',
'format_exc',
]
try:
from black_magic.decorator import wraps
except ImportError:
from functools import wraps
class Emitter:
"""Simple event emitter for a known finite set of events."""
def __init__(self, event_names=(), *args, **kwargs):
"""Initialize with empty lists of event handlers."""
super(Emitter, self).__init__(*args, **kwargs)
self._event_handlers = {}
for evt in event_names:
self._event_handlers[evt] = []
def trigger(self, event, *args):
"""Trigger event by name."""
for handler in self._event_handlers[event]:
handler(*args)
def connect(self, event, handler):
"""Connect an event handler."""
self._event_handlers[event].append(handler)
def disconnect(self, event, handler):
"""Disconnect an event handler."""
self._event_handlers[event].remove(handler)
def samefile(a: str, b: str) -> bool:
"""Check if two pathes represent the same file."""
try:
return os.path.samefile(a, b)
except OSError:
return os.path.normpath(a) == os.path.normpath(b)
def sameuuid(a: str, b: str) -> bool:
"""Compare two UUIDs."""
return a and b and a.lower() == b.lower()
def setdefault(self: dict, other: dict):
"""Like .update() but values in self take priority."""
for k, v in other.items():
self.setdefault(k, v)
def extend(a: dict, b: dict) -> dict:
"""Merge two dicts and return a new dict. Much like subclassing works."""
res = a.copy()
res.update(b)
return res
# ----------------------------------------
# udisks.Device helper classes
# ----------------------------------------
class AttrDictView:
"""Provide attribute access view to a dictionary."""
def __init__(self, data):
self.__data = data
def __getattr__(self, key):
try:
return self.__data[key]
except KeyError:
raise AttributeError
class ObjDictView:
"""Provide dict-like access view to the attributes of an object."""
def __init__(self, object, valid=None):
self._object = object
self._valid = valid
def __getitem__(self, key):
if self._valid is None or key in self._valid:
try:
return getattr(self._object, key)
except AttributeError:
raise KeyError(key)
raise KeyError("Unknown key: {}".format(key))
class DaemonBase:
active = False
def activate(self):
udisks = self._mounter.udisks
for event, handler in self.events.items():
udisks.connect(event, handler)
self.active = True
def deactivate(self):
udisks = self._mounter.udisks
for event, handler in self.events.items():
udisks.disconnect(event, handler)
self.active = False
# ----------------------------------------
# byte array to string conversion
# ----------------------------------------
def decode_ay(ay):
"""Convert binary blob from DBus queries to strings."""
if ay is None:
return ''
elif isinstance(ay, str):
return ay
elif isinstance(ay, bytes):
return ay.decode('utf-8')
else:
# dbus.Array([dbus.Byte]) or any similar sequence type:
return bytearray(ay).rstrip(bytearray((0,))).decode('utf-8')
def exc_message(exc):
"""Get an exception message."""
message = getattr(exc, 'message', None)
return message or str(exc)
def format_exc(*exc_info):
"""Show exception with traceback."""
typ, exc, tb = exc_info or sys.exc_info()
error = traceback.format_exception(typ, exc, tb)
return "".join(error)
|
{
"content_hash": "33f51df9e9bf9aa10f80efb1b6517854",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 77,
"avg_line_length": 24.559748427672957,
"alnum_prop": 0.5708066581306018,
"repo_name": "pstray/udiskie",
"id": "1b009b6db7cab30fa6b82470c431bc07ded5e249",
"size": "3905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "udiskie/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "292"
},
{
"name": "Python",
"bytes": "163599"
},
{
"name": "Shell",
"bytes": "5277"
}
],
"symlink_target": ""
}
|
def execute(args):
return args
|
{
"content_hash": "6aac12f264997f3143135804c9303497",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 18,
"avg_line_length": 9.8,
"alnum_prop": 0.4897959183673469,
"repo_name": "mabotech/maboss.py",
"id": "8182694b0dc0d348a1d2deb6202ac27aa7409a7d",
"size": "51",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/mabolab/mabolab/executor/pl_executor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "14864"
},
{
"name": "JavaScript",
"bytes": "4950"
},
{
"name": "Lua",
"bytes": "683"
},
{
"name": "Python",
"bytes": "433923"
},
{
"name": "Shell",
"bytes": "667"
}
],
"symlink_target": ""
}
|
from scrapy.selector import Selector
from scrapy.spider import Spider
from familynames.items import FontFamilyItem
class NouvelleNoireSpider(Spider):
name = 'nouvellenoire'
allowed_domains = [
'nouvellenoire.ch'
]
start_urls = [
'https://nouvellenoire.ch/collections/all'
]
def parse(self, response):
sel = Selector(response)
items = sel.xpath('//div[@class="details"]/a/h4/text()')
r = []
for item in items.extract():
ffi = FontFamilyItem()
try:
ffi['title'] = item.split('|')[1].strip()
except IndexError:
ffi['title'] = item
r.append(ffi)
return r
|
{
"content_hash": "4e6fc006804575c696aaae424d7833a7",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 64,
"avg_line_length": 25.571428571428573,
"alnum_prop": 0.5628491620111732,
"repo_name": "lowks/fontbakery-cli",
"id": "a0067254ea41afb4a0eb6f6b556974bbd034ef9b",
"size": "716",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bakery_cli/scrapes/familynames/familynames/spiders/nouvellenoire_spider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import tweepy
def loadkeys(filename):
""""
load parrt's keys/tokens from CSV file with form
consumer_key, consumer_secret, access_token, access_token_secret
"""
with open(filename) as f:
items = f.readline().strip().split(', ')
return items
consumer_key, consumer_secret, \
access_token, access_token_secret \
= loadkeys("/Users/parrt/Dropbox/licenses/twitter.csv")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
user = api.get_user('realDonaldTrump')
print("followers", user.followers_count)
for status in tweepy.Cursor(api.user_timeline, id='realDonaldTrump').items(100):
print(status)
|
{
"content_hash": "433010aac9b1da95ffef406beaa30fdd",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 27.074074074074073,
"alnum_prop": 0.707250341997264,
"repo_name": "parrt/msan692",
"id": "b58b9b49c68403226ba164397ee0a0aad915a154",
"size": "731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notes/code/twitter/trump_tweets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11717032"
},
{
"name": "Jupyter Notebook",
"bytes": "264133"
},
{
"name": "Python",
"bytes": "79564"
},
{
"name": "Shell",
"bytes": "2500"
},
{
"name": "TeX",
"bytes": "11367"
}
],
"symlink_target": ""
}
|
""" Unit test file for the InitCommand class.
"""
import os
import sys
import unittest
from io import StringIO
from snapshot_dbg_cli import cli_run
from snapshot_dbg_cli import data_formatter
from snapshot_dbg_cli.cli_services import CliServices
from snapshot_dbg_cli.gcloud_cli_service import GcloudCliService
from snapshot_dbg_cli.firebase_types import DatabaseGetResponse
from snapshot_dbg_cli.firebase_types import DatabaseGetStatus
from snapshot_dbg_cli.firebase_types import DatabaseInstance
from snapshot_dbg_cli.firebase_types import DatabaseCreateResponse
from snapshot_dbg_cli.firebase_types import DatabaseCreateStatus
from snapshot_dbg_cli.firebase_types import FirebaseProject
from snapshot_dbg_cli.firebase_types import FirebaseProjectGetResponse
from snapshot_dbg_cli.firebase_types import FirebaseProjectStatus
from snapshot_dbg_cli.firebase_management_rest_service import FirebaseManagementRestService
from snapshot_dbg_cli.permissions_rest_service import PermissionsRestService
from snapshot_dbg_cli.snapshot_debugger_rtdb_service import SnapshotDebuggerRtdbService
from snapshot_dbg_cli.user_output import UserOutput
from snapshot_dbg_cli.exceptions import SilentlyExitError
from unittest.mock import call
from unittest.mock import MagicMock
from unittest.mock import patch
TEST_PROJECT_ID = 'cli-test-project'
def build_firebase_project(defaut_rtdb_name=None):
project_data = {'resources': {}}
if defaut_rtdb_name is not None:
project_data['resources']['realtimeDatabaseInstance'] = defaut_rtdb_name
return FirebaseProject(project_data)
def build_database_instance(db_name, db_type):
return DatabaseInstance({
'name': f'projects/1111111111/locations/us-central1/instances/{db_name}',
'project': 'projects/1111111111',
'databaseUrl': f'https://{db_name}.firebaseio.com',
'type': db_type,
'state': 'ACTIVE'
})
class InitCommandTests(unittest.TestCase):
""" Contains the unit tests for the GetSnapshot class.
"""
def setUp(self):
self.cli_services = MagicMock(spec=CliServices)
self.data_formatter = data_formatter.DataFormatter()
self.cli_services.data_formatter = self.data_formatter
# By wrapping a real UserOutput instance, we can test the method calls etc,
# and it will still perform the actual stdout/stderr output which we can
# also check when desired.
self.user_output_mock = MagicMock(
wraps=UserOutput(
is_debug_enabled=False,
data_formatter=data_formatter.DataFormatter()))
self.cli_services.user_output = self.user_output_mock
self.cli_services.project_id = TEST_PROJECT_ID
self.gcloud_service_mock = MagicMock(spec=GcloudCliService)
self.cli_services.gcloud_service = self.gcloud_service_mock
self.permissions_service_mock = MagicMock(spec=PermissionsRestService)
self.cli_services.permissions_service = self.permissions_service_mock
self.firebase_management_service_mock = MagicMock(
spec=FirebaseManagementRestService)
self.cli_services.firebase_management_service = \
self.firebase_management_service_mock
self.rtdb_service_mock = MagicMock(spec=SnapshotDebuggerRtdbService)
self.cli_services.get_snapshot_debugger_rtdb_service = MagicMock(
return_value=self.rtdb_service_mock)
self.cli_services.get_snapshot_debugger_default_database_id = MagicMock(
return_value=f'{TEST_PROJECT_ID}-cdbg')
self.cli_services.get_firebase_default_rtdb_id = MagicMock(
return_value=f'{TEST_PROJECT_ID}-default-rtdb')
# Install a default happy path set of responses to mimic a project that is
# already fully configured to minimize duplicating uninteresting parts of
# the tests. The tests will override these defaults when appropriate.
self.gcloud_service_mock.is_api_enabled = MagicMock(return_value=True)
self.firebase_management_service_mock.project_get = MagicMock(
return_value=FirebaseProjectGetResponse(FirebaseProjectStatus.ENABLED,
build_firebase_project()))
self.firebase_management_service_mock.rtdb_instance_get = MagicMock(
return_value=DatabaseGetResponse(
status=DatabaseGetStatus.EXISTS,
database_instance=build_database_instance(TEST_PROJECT_ID,
'USER_DATABASE')))
self.rtdb_service_mock.get_schema_version = MagicMock(return_value='1')
def run_cmd(self, testargs, expected_exception=None):
args = ['cli-test', 'init'] + testargs
# We patch os.environ as some cli arguments can come from environment
# variables, and if they happen to be set in the terminal running the tests
# it will affect things.
# We patch the sleep to speed the tests up as the init command has a sleep,
# which slows the tests down.
with patch.object(sys, 'argv', args), \
patch.dict(os.environ, {}, clear=True), \
patch('time.sleep', return_value=None), \
patch('sys.stdout', new_callable=StringIO) as out, \
patch('sys.stderr', new_callable=StringIO) as err:
if expected_exception is not None:
with self.assertRaises(expected_exception):
cli_run.run(self.cli_services)
else:
cli_run.run(self.cli_services)
return out, err
def test_only_default_location_allowed(self):
testargs = ['--location=us-west1']
out, err = self.run_cmd(testargs, expected_exception=SilentlyExitError)
self.assertEqual(
"ERROR: Currently the only supported location is 'us-central1'\n",
err.getvalue())
self.assertEqual('', out.getvalue())
# Ensure the test had not progressed at all, that check should be done
# first.
self.permissions_service_mock.check_required_permissions.assert_not_called()
self.gcloud_service_mock.is_api_enabled.assert_not_called()
self.firebase_management_service_mock.project_get.assert_not_called()
self.firebase_management_service_mock.rtdb_instance_get.assert_not_called()
self.cli_services.get_snapshot_debugger_rtdb_service.assert_not_called()
self.rtdb_service_mock.get_schema_version.assert_not_called()
def test_permissions_check_done_as_expected(self):
testargs = []
self.permissions_service_mock.check_required_permissions = MagicMock(
side_effect=SilentlyExitError())
self.run_cmd(testargs, expected_exception=SilentlyExitError)
self.permissions_service_mock.check_required_permissions.assert_called_once(
)
# The permissions check should be first, and we put a side effect for it to
# error out, so none of these methods should have been called.
self.firebase_management_service_mock.project_get.assert_not_called()
self.gcloud_service_mock.is_api_enabled.assert_not_called()
self.firebase_management_service_mock.rtdb_instance_get.assert_not_called()
self.cli_services.get_snapshot_debugger_rtdb_service.assert_not_called()
self.rtdb_service_mock.get_schema_version.assert_not_called()
def test_firebase_management_api_not_enabled(self):
"""Tests the situation where the Firebase Management API is not enabled.
If this API (firebase.googleapis.com) is not enabled, that means the project
likely has not been enabled for Firebase. The init command should emit a
message instructing the user how to enable Firebase on their project while
also providing the exact link to use.
"""
testargs = []
self.gcloud_service_mock.is_api_enabled = MagicMock(return_value=False)
out, err = self.run_cmd(testargs, expected_exception=SilentlyExitError)
# Verify the outputted text contains the expected migrate project
# instructions for enabling a project for Firebase.
self.permissions_service_mock.check_required_permissions.assert_called_once(
)
self.gcloud_service_mock.is_api_enabled.assert_called_once_with(
'firebase.googleapis.com')
self.assertIn(
'Your Google Cloud project must be enabled for Firebase resources',
err.getvalue())
self.assertIn('Point your browser to the following URL:', err.getvalue())
self.assertIn(
'https://console.firebase.google.com/?dlAction=MigrateCloudProject'
'&cloudProjectNumber=cli-test-project', err.getvalue())
self.assertEqual('', out.getvalue())
# Ensure the command exited before progressing any further
self.firebase_management_service_mock.rtdb_instance_get.assert_not_called()
self.cli_services.get_snapshot_debugger_rtdb_service.assert_not_called()
self.rtdb_service_mock.get_schema_version.assert_not_called()
def test_project_not_firebase_enabled(self):
"""Tests the situation where the project is not enabled for Firebase.
This is the situation where the
- Firebase Management API is enabled (firebase.googleapis.com)
- The API was used to query the project state.
- The response indicates the project is not enabled for Firebase
This is a bit of a corner case situation, since if the Firebase Management
API is enabled for the project, it would be expected that the project was
enabled for Firebase.
In this case the user must enable the project for Firebase, which means they
need to follow the migration instructions that are emitted when the
Firebase Management API is found to be disabled. However for the migration
instructions to work the Firebase Management API must first be disabled.
Therefore in this case the instructions emitted should be to disable the
API, and to then rerun the init command, which will detect the API is
disabled and emit the migration instructions.
"""
testargs = []
self.gcloud_service_mock.is_api_enabled = MagicMock(return_value=True)
self.firebase_management_service_mock.project_get = MagicMock(
return_value=FirebaseProjectGetResponse(
status=FirebaseProjectStatus.NOT_ENABLED))
out, err = self.run_cmd(testargs, expected_exception=SilentlyExitError)
# Verify the outputted text contains the expected messaging about
# disabling the API and rerunning the init command.
# instructions for enabling a project for Firebase.
self.permissions_service_mock.check_required_permissions.assert_called_once(
)
self.gcloud_service_mock.is_api_enabled.assert_called_once_with(
'firebase.googleapis.com')
self.firebase_management_service_mock.project_get.assert_called_once()
self.assertIn('Your project is not yet enabled for Firebase',
err.getvalue())
self.assertIn('Disable the Firebase Management API', err.getvalue())
self.assertIn(
'https://console.developers.google.com/apis/'
'api/firebase.googleapis.com?project=cli-test-project', err.getvalue())
self.assertIn('Rerun the init command', err.getvalue())
self.assertEqual('', out.getvalue())
# Ensure the command exited before progressing any further
self.cli_services.get_snapshot_debugger_rtdb_service.assert_not_called()
self.rtdb_service_mock.get_schema_version.assert_not_called()
def test_rtdb_management_api_gets_enabled_when_not_enabled(self):
testargs = []
# Returns True for the first check, firebase.googleapis.com
# Returns False for the second check, firebasedatabase.googleapis.com
self.gcloud_service_mock.is_api_enabled = MagicMock(
side_effect=[True, False])
self.firebase_management_service_mock.project_get = MagicMock(
return_value=FirebaseProjectGetResponse(
status=FirebaseProjectStatus.ENABLED))
self.run_cmd(testargs)
self.gcloud_service_mock.is_api_enabled.assert_has_calls([
call('firebase.googleapis.com'),
call('firebasedatabase.googleapis.com')
])
self.gcloud_service_mock.enable_api.assert_called_once_with(
'firebasedatabase.googleapis.com')
def test_rtdb_management_api_does_not_get_enabled_when_already_enabled(self):
testargs = []
self.gcloud_service_mock.is_api_enabled = MagicMock(return_value=True)
self.run_cmd(testargs)
self.gcloud_service_mock.is_api_enabled.assert_has_calls([
call('firebase.googleapis.com'),
call('firebasedatabase.googleapis.com')
])
self.gcloud_service_mock.enable_api.assert_not_called()
def test_use_rtdb_database_and_id_mutually_exclusive(self):
testargs = ['--use-default-rtdb', '--database-id=foo']
out, err = self.run_cmd(testargs, expected_exception=SystemExit)
self.assertIn(
'init: error: argument --database-id: not allowed '
'with argument --use-default-rtdb', err.getvalue())
self.assertEqual('', out.getvalue())
def test_db_created_when_it_doesnt_exist(self):
# Testdata: (test_name, testargs, db_type, expected_db_name)
# The db_type doesn't actually enter into this test, but we are setting it
# nonetheless to the value it would have in practise.
testcases = [
('Default', [], 'USER_DATABASE', 'cli-test-project-cdbg'),
('Use Default RTDB', ['--use-default-rtdb'], 'DEFAULT_DATABASE',
'cli-test-project-default-rtdb'),
('Custom', ['--database-id=custom-database-name'], 'USER_DATABASE',
'custom-database-name'),
]
self.firebase_management_service_mock.rtdb_instance_get = MagicMock(
return_value=DatabaseGetResponse(
status=DatabaseGetStatus.DOES_NOT_EXIST))
for test_name, testargs, db_type, expected_database_name in testcases:
with self.subTest(test_name):
self.firebase_management_service_mock.rtdb_instance_get.reset_mock()
database_instance = build_database_instance(expected_database_name,
db_type)
self.firebase_management_service_mock.rtdb_instance_create = MagicMock(
return_value=DatabaseCreateResponse(
status=DatabaseCreateStatus.SUCCESS,
database_instance=database_instance))
self.run_cmd(testargs)
service_mock = self.firebase_management_service_mock
service_mock.rtdb_instance_get.assert_called_once_with(
expected_database_name)
service_mock.rtdb_instance_create.assert_called_once_with(
database_id=expected_database_name, location='us-central1')
def test_handles_db_create_fails_precondition_as_expected(self):
# Testdata: (test_name, testargs, db_type, expected_db_name)
# The db_type doesn't actually enter into this test, but we are setting it
# nonetheless to the value it would have in practise.
testcases = [
('Default', [], 'cli-test-project-cdbg'),
('Use Default RTDB', ['--use-default-rtdb'],
'cli-test-project-default-rtdb'),
('Custom', ['--database-id=custom-database-name'],
'custom-database-name'),
]
self.firebase_management_service_mock.rtdb_instance_get = MagicMock(
return_value=DatabaseGetResponse(
status=DatabaseGetStatus.DOES_NOT_EXIST))
for test_name, testargs, expected_database_name in testcases:
with self.subTest(test_name):
self.firebase_management_service_mock.rtdb_instance_get.reset_mock()
self.firebase_management_service_mock.rtdb_instance_create = MagicMock(
return_value=DatabaseCreateResponse(
status=DatabaseCreateStatus.FAILED_PRECONDITION))
out, err = self.run_cmd(testargs, expected_exception=SilentlyExitError)
service_mock = self.firebase_management_service_mock
service_mock.rtdb_instance_get.assert_called_once_with(
expected_database_name)
service_mock.rtdb_instance_create.assert_called_once_with(
database_id=expected_database_name, location='us-central1')
project_billing_link = ('https://console.firebase.google.com/project/'
'cli-test-project/usage/details')
self.assertIn(
(f"Database '{expected_database_name}' could not be created on "
"project 'cli-test-project'"), err.getvalue())
self.assertIn(
f'Please check your billing plan at {project_billing_link}',
err.getvalue())
self.assertIn(('Read about the billing plans at '
'https://firebase.google.com/pricing'), err.getvalue())
self.assertIn(f"Visit {project_billing_link} and click 'Modify plan'",
err.getvalue())
self.assertEqual('', out.getvalue())
def test_db_not_created_when_it_exists(self):
# Testdata: (test_name, testargs, db_type, expected_db_name)
# The db_type doesn't actually enter into this test, but we are setting it
# nonetheless to the value it would have in practise.
testcases = [
('Default', [], 'USER_DATABASE', 'cli-test-project-cdbg'),
('Use Default RTDB', ['--use-default-rtdb'], 'DEFAULT_DATABASE',
'cli-test-project-default-rtdb'),
('Custom', ['--database-id=custom-database-name'], 'USER_DATABASE',
'custom-database-name'),
]
for test_name, testargs, db_type, expected_database_name in testcases:
with self.subTest(test_name):
self.firebase_management_service_mock.rtdb_instance_get.reset_mock()
database_instance = build_database_instance(expected_database_name,
db_type)
self.firebase_management_service_mock.rtdb_instance_get = MagicMock(
return_value=DatabaseGetResponse(
status=DatabaseGetStatus.EXISTS,
database_instance=database_instance))
self.run_cmd(testargs)
service_mock = self.firebase_management_service_mock
service_mock.rtdb_instance_get.assert_called_once_with(
expected_database_name)
service_mock.rtdb_instance_create.assert_not_called()
def test_correct_db_url_is_used(self):
# Testdata: (test_name, testargs, db_type, expected_db_name)
# The db_type doesn't actually enter into this test, but we are setting it
# nonetheless to the value it would have in practise.
testcases = [
('Default', [], 'USER_DATABASE', 'cli-test-project-cdbg'),
('Use Default RTDB', ['--use-default-rtdb'], 'DEFAULT_DATABASE',
'cli-test-project-default-rtdb'),
('Custom', ['--database-id=custom-database-name'], 'USER_DATABASE',
'custom-database-name'),
]
for test_name, testargs, db_type, expected_database_name in testcases:
with self.subTest(test_name):
self.cli_services.get_snapshot_debugger_rtdb_service.reset_mock()
database_instance = build_database_instance(expected_database_name,
db_type)
self.firebase_management_service_mock.rtdb_instance_get = MagicMock(
return_value=DatabaseGetResponse(
status=DatabaseGetStatus.EXISTS,
database_instance=database_instance))
expected_url = f'https://{expected_database_name}.firebaseio.com'
self.assertEqual(expected_url, database_instance.database_url)
self.run_cmd(testargs)
get_service_mock = self.cli_services.get_snapshot_debugger_rtdb_service
get_service_mock.assert_called_once_with(database_url=expected_url)
def test_db_not_initialized_when_already_initialized(self):
# By returning a value here, that indicates the database has been
# initialized
self.rtdb_service_mock.get_schema_version = MagicMock(return_value='1')
self.run_cmd(testargs=[])
self.rtdb_service_mock.set_schema_version.assert_not_called()
def test_db_initialized_when_not_yet_initialized(self):
# By returning None here, that indicates the database has not been
# initialized
self.rtdb_service_mock.get_schema_version = MagicMock(return_value=None)
self.run_cmd(testargs=[])
self.rtdb_service_mock.set_schema_version.assert_called_once_with('1')
def test_db_info_output_after_successful_run(self):
database_instance = DatabaseInstance({
'name': ('projects/1111111111/locations/us-central1/instances'
'/cli-test-project-cdbg'),
'project': 'projects/1111111111',
'databaseUrl': 'https://cli-test-project-cdbg.firebaseio.com',
'type': 'USER_DATABASE',
'state': 'ACTIVE'
})
self.firebase_management_service_mock.rtdb_instance_get = MagicMock(
return_value=DatabaseGetResponse(
status=DatabaseGetStatus.EXISTS,
database_instance=database_instance))
out, err = self.run_cmd(testargs=[])
self.assertIn("Project 'cli-test-project' is successfully configured",
err.getvalue())
self.assertIn(
('name: projects/1111111111/locations/us-central1/instances/'
'cli-test-project-cdbg'), err.getvalue())
self.assertIn('project: projects/1111111111', err.getvalue())
self.assertIn('database url: https://cli-test-project-cdbg.firebaseio.com',
err.getvalue())
self.assertIn('type: USER_DATABASE', err.getvalue())
self.assertIn('state: ACTIVE', err.getvalue())
self.assertEqual('', out.getvalue())
|
{
"content_hash": "bf2dde1a3ad8b84d847ce17b8845c358",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 91,
"avg_line_length": 43.65092402464066,
"alnum_prop": 0.6951735817104149,
"repo_name": "GoogleCloudPlatform/snapshot-debugger",
"id": "75e8b1575ddd8152b83e440bca182feb13654e7a",
"size": "21833",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "snapshot_dbg_cli_tests/test_init_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "81"
},
{
"name": "Python",
"bytes": "519793"
}
],
"symlink_target": ""
}
|
"""
Tests for frameworks
"""
import inspect
from calvin.utilities import calvinlogger
from calvin.runtime.south.async import get_frameworks
from calvin.runtime.south.async import twistedimpl
from twisted.internet import defer, threads
import pytest
pytestmark = pytest.mark.unittest
_LOG = calvinlogger.get_logger(__name__)
FRAMEWORKS = get_frameworks()
REFERENCE_FW = "twistedimpl"
# TODO: move this to __init__.py of async instead of _modueles ?
MODULES = {'defer': {'Deferred': {'type': 'class', 'comp': defer.Deferred},
'DeferredList': {'type': 'class', 'comp': defer.DeferredList},
'inline_callbacks': {'type': 'function', 'comp': defer.inlineCallbacks},
'maybe_deferred': {'type': 'function', 'comp': defer.maybeDeferred}},
'async': {'DelayedCall': {'type': 'class', 'comp': twistedimpl.async.DelayedCall},
'run_ioloop': {'type': 'class', 'comp': twistedimpl.async.run_ioloop},
'stop_ioloop': {'type': 'class', 'comp': twistedimpl.async.stop_ioloop}},
'server_connection': {'ServerProtocolFactory': {'type': 'class', 'comp': twistedimpl.server_connection.ServerProtocolFactory},
'LineProtocol': {'type': 'class', 'comp': twistedimpl.server_connection.LineProtocol},
'RawDataProtocol': {'type': 'class', 'comp': twistedimpl.server_connection.LineProtocol}},
'threads': {'defer_to_thread': {'type': 'function', 'comp': threads.deferToThread},
'call_multiple_in_thread': {'type': 'function', 'comp': threads.callMultipleInThread}},
'filedescriptor': {'FD': {'type': 'class', 'comp': twistedimpl.filedescriptor.FD}}}
def function_check(func1, func2):
"""
Function param tester
"""
func1_args = inspect.getargspec(func1)
func2_args = inspect.getargspec(func2)
return func1_args == func2_args
class TestFrameworks(object):
"""
Testign frameworks implemented
"""
def test_api_exists(self, monkeypatch):
"""
Just test that all fw have all the API specified.
"""
for framework in FRAMEWORKS:
for module, items in MODULES.items():
module_obj = __import__("calvin.runtime.south.async.%s.%s" % (framework, module),
globals=globals(), fromlist=[''])
for item, info in items.items():
# Check for existans
item_obj = getattr(module_obj, item, None)
comp_obj = info['comp']
assert item_obj
assert type(item_obj) == type(comp_obj)
if info['type'] == 'class':
# Loop and check for attr and functions
# TODO: check for classes in classes ?!?
for item_name in dir(item_obj):
if not item_name.startswith("_"):
a_obj = getattr(item_obj, item_name)
c_obj = getattr(comp_obj, item_name, None)
if callable(a_obj):
assert function_check(a_obj, c_obj)
else:
assert type(a_obj) == type(c_obj)
elif info['type'] == 'function':
# check name and params
assert function_check(item_obj, comp_obj)
|
{
"content_hash": "043f17f8f533b650027bd86ce9c26d8c",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 137,
"avg_line_length": 45.56962025316456,
"alnum_prop": 0.5325,
"repo_name": "EricssonResearch/calvin-base",
"id": "7165fb0fdcd8fa62ebbc492aa6b99b8f7734a3da",
"size": "4205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/runtime/south/async/tests/test_frameworks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
}
|
"""Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it
Example:
with ssh_agent.SshAgent() as agent:
agent.add_key(private_key_string)
# do ssh stuff
# as agent loses scope, the ssh agent is killed
"""
from __future__ import with_statement
import atexit
import tempfile
import os
import sys
import shutil
import subprocess
import random
import time
import datetime
class SshAgentException(Exception):
"""An exception thrown for problems in SshAgent
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(SshAgentException, self).__init__(message)
class SshAgent(object):
"""Run an ssh agent and set SSH_AUTH_SOCK so that clients will use it.
The running agent can have one or more keys added (via the SshAgent.add_key()
method or via any other method that can find and talk to the running agent.
"""
class Cleanup(object):
"""A helper functor class for SshAgent
An object of this class can be passed
directly to atexit, which will call __call__() when the
program exits
"""
def __init__(self, ssh_agent, ssh_auth_sock_dir):
self.ssh_agent = ssh_agent
self.ssh_auth_sock_dir = ssh_auth_sock_dir
self.cleaned_up = False
self.original_env_var = os.environ.get('SSH_AUTH_SOCK')
def __call__(self):
if self.cleaned_up:
return
self.cleaned_up = True
try:
shutil.rmtree(self.ssh_auth_sock_dir, ignore_errors=True)
except OSError:
pass
try:
self.ssh_agent.kill()
except OSError:
pass
if self.original_env_var:
os.environ['SSH_AUTH_SOCK'] = self.original_env_var
else:
del os.environ['SSH_AUTH_SOCK']
def pass_(self):
"""A function to appease pylint"""
pass
def pass__(self):
"""Another function to appease pylint"""
self.pass_()
def __init__(self):
devnull = open(os.devnull, 'w')
# Start an ssh-agent process and register it to be killed atexit
self.ssh_auth_sock_dir = tempfile.mkdtemp(prefix=os.path.basename(sys.argv[0]) + '.')
self.ssh_auth_sock = os.path.join(self.ssh_auth_sock_dir, "ssh_agent")
self.ssh_agent = subprocess.Popen(["ssh-agent", "-d", "-a", self.ssh_auth_sock], stdout=devnull, stderr=devnull)
self.cleanup = self.Cleanup(self.ssh_agent, self.ssh_auth_sock_dir)
# this is here so that when python exits, we make sure that the agent is killed
# (in case python exits before our __del__() is called
atexit.register(self.cleanup)
os.environ["SSH_AUTH_SOCK"] = self.ssh_auth_sock
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tback):
self.cleanup()
def __del__(self):
self.cleanup()
def kill(self):
'''Explicitly kill the running ssh-agent
It's not necessary to call this function as the agent
will be cleaned up automatically.
'''
self.cleanup()
def add_key(self, key):
"""Add a key to the running agent.
Note:
This function can be called any number of times to add multiple keys.
Args:
key (str): A string containing the ssh private key to be added (the
actual key data, not the filename of a key)
Raises:
SshAgentException: when ssh-add does not immediately return (as in the
case of a private key with a passphrase)
"""
#if self.ssh_agent.poll() is None:
# raise SshAgentException("Unable to add ssh key. Did agent die?")
named_pipe_path = os.path.join(self.ssh_auth_sock_dir, "keypipe." + str(random.getrandbits(64)))
try:
os.mkfifo(named_pipe_path, 0600)
except OSError, exception:
print "Failed to create FIFO: %s" % exception
devnull = open(os.devnull, 'w')
ssh_add = subprocess.Popen(["ssh-add", named_pipe_path], stdout=devnull, stderr=devnull)
fifo = open(named_pipe_path, 'w')
print >> fifo, key
fifo.close()
#Popen.wait() doesn't have a timeout, so we'll implement one using poll() :(
start_time = datetime.datetime.now()
while ssh_add.poll() is None:
if (datetime.datetime.now() - start_time).total_seconds() > 5:
try:
ssh_add.kill()
except OSError:
pass
raise SshAgentException("Unable to add ssh key. Timed out. Does key have a passphrase?")
time.sleep(0.1)
os.remove(named_pipe_path)
# pylint: disable=too-many-lines
# these are already imported inside of the ssh library
#import os
#import subprocess
class GitCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GitCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
path,
verbose=False,
ssh_key=None,
author=None):
''' Constructor for GitCLI '''
self.path = path
self.verbose = verbose
self.ssh_key = ssh_key
self.author = author
self.environment_vars = os.environ.copy()
if self.author:
author_dict = {}
author_list = author.split('<')
author_dict['GIT_COMMITTER_NAME'] = author_list[0].strip()
author_dict['GIT_COMMITTER_EMAIL'] = author_list[0].strip()
self.environment_vars.update(author_dict)
def _add(self, files_to_add=None):
''' git add '''
cmd = ["add", "--no-ignore-removal"]
if files_to_add:
cmd.extend(files_to_add)
else:
cmd.append('.')
results = self.git_cmd(cmd)
return results
def _commit(self, msg, author=None):
''' git commit with message '''
cmd = ["commit", "-m", msg]
if author:
cmd += ["--author", author]
results = self.git_cmd(cmd)
return results
def _clone(self, repo, dest, bare=False):
''' git clone '''
cmd = ["clone"]
if bare:
cmd += ["--bare"]
cmd += [repo, dest]
results = self.git_cmd(cmd)
return results
def _fetch(self, remote):
''' git fetch '''
cmd = ["fetch"]
cmd += [remote]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _status(self, porcelain=False, show_untracked=True):
''' Do a git status '''
cmd = ["status"]
if porcelain:
cmd.append('--porcelain')
if show_untracked:
cmd.append('--untracked-files=normal')
else:
cmd.append('--untracked-files=no')
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _checkout(self, branch):
''' Do a git checkout to <branch> '''
cmd = ["checkout", branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _get_current_branch(self):
''' Do a git checkout to <branch> '''
cmd = ["describe", "--contains", "--all", "HEAD"]
results = self.git_cmd(cmd, output=True, output_type='raw')
results['results'] = results['results'].rstrip()
return results
def _merge(self, merge_id):
''' Do a git checkout to <branch> '''
cmd = ["merge", merge_id]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _push(self, remote, src_branch, dest_branch):
''' Do a git checkout to <branch> '''
push_branches = src_branch + ":" + dest_branch
cmd = ["push", remote, push_branches]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _remote_update(self):
''' Do a git remote update '''
cmd = ["remote", "update"]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _diff(self, diff_branch):
''' Do a git diff diff_branch'''
cmd = ["diff", diff_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _rebase(self, rebase_branch):
''' Do a git rebase rebase_branch'''
cmd = ["rebase", rebase_branch]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def _config(self, get_args):
''' Do a git config --get <get_args> '''
cmd = ["config", '--get', get_args]
results = self.git_cmd(cmd, output=True, output_type='raw')
return results
def git_cmd(self, cmd, output=False, output_type='json'):
'''Base command for git '''
cmds = ['/usr/bin/git']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
if self.ssh_key:
with SshAgent() as agent:
self.environment_vars['SSH_AUTH_SOCK'] = os.environ['SSH_AUTH_SOCK']
agent.add_key(self.ssh_key)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
else:
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.environment_vars)
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"cmd": cmds
})
else:
rval.update({"results": {}})
# Always include stdout/stderr:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class GitCommit(GitCLI):
''' Class to wrap the git command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
msg,
path,
commit_files,
author=None):
''' Constructor for GitCommit '''
super(GitCommit, self).__init__(path, author=author)
self.path = path
self.msg = msg
self.commit_files = commit_files
self.author = author
self.debug = []
os.chdir(path)
self.status_results = self._status(porcelain=True)
self.debug.append(self.status_results)
def get_files_to_commit(self):
''' do we have files to commit?'''
files_found_to_be_committed = []
# get the list of files that changed according to git status
git_status_out = self.status_results['results'].split('\n')
git_status_files = []
#clean up the data
for line in git_status_out:
file_name = line[3:]
if "->" in line:
file_name = file_name.split("->")[-1].strip()
git_status_files.append(file_name)
# Check if the files to be commited are in the git_status_files
for file_name in self.commit_files:
file_name = str(file_name)
for status_file in git_status_files:
if status_file.startswith(file_name):
files_found_to_be_committed.append(status_file)
return files_found_to_be_committed
def have_commits(self):
''' do we have files to commit?'''
# test the results
if self.status_results['results']:
return True
return False
def commit(self):
'''perform a git commit '''
if self.have_commits():
add_results = None
if self.commit_files:
files_to_add = self.get_files_to_commit()
if files_to_add:
add_results = self._add(files_to_add)
else:
add_results = self._add()
if add_results:
self.debug.append(add_results)
commit_results = self._commit(self.msg, self.author)
commit_results['debug'] = self.debug
return commit_results
return {'returncode': 0,
'results': {},
'no_commits': True,
'debug': self.debug
}
def main():
'''
ansible git module for committing
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str', choices=['present']),
msg=dict(default=None, required=True, type='str'),
path=dict(default=None, required=True, type='str'),
author=dict(default=None, required=False, type='str'),
commit_files=dict(default=None, required=False, type='list'),
),
supports_check_mode=False,
)
git = GitCommit(module.params['msg'],
module.params['path'],
module.params['commit_files'],
module.params['author'],
)
state = module.params['state']
if state == 'present':
results = git.commit()
if results['returncode'] != 0:
module.fail_json(msg=results)
if results.has_key('no_commits'):
module.exit_json(changed=False, results=results, state="present")
module.exit_json(changed=True, results=results, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
|
{
"content_hash": "388e16bb443dc1709c3acdc2b0b1f9a8",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 120,
"avg_line_length": 31.010162601626018,
"alnum_prop": 0.5335911384938061,
"repo_name": "drewandersonnz/openshift-tools",
"id": "94f8b65dd7eb1e88015cc328f6a3930cdf586948",
"size": "15635",
"binary": false,
"copies": "12",
"ref": "refs/heads/prod",
"path": "ansible/roles/lib_git/library/git_commit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24919"
},
{
"name": "Dockerfile",
"bytes": "10248"
},
{
"name": "Go",
"bytes": "127388"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "67678"
},
{
"name": "JavaScript",
"bytes": "9573"
},
{
"name": "Makefile",
"bytes": "1108"
},
{
"name": "PHP",
"bytes": "30017"
},
{
"name": "Python",
"bytes": "19774421"
},
{
"name": "Shell",
"bytes": "553874"
}
],
"symlink_target": ""
}
|
"""
Helper functions to move files around.
"""
import os
from string import ascii_lowercase
def split_file_and_return_names(raw_data_file):
"""
Splits an input raw data file into files of different
line lengths. Returns the created files.
"""
rawfilesize = os.path.getsize(raw_data_file)
# Step 1.1 - split file into 1000000 line (~100Mb) chunks
# Note: the 'split' command takes the raw_data_file, splits it into separate files
# and puts those files in the current directory (i.e. working_directory)
if(rawfilesize < 2e8):
os.system('split -l 100000 ' + raw_data_file)
else:
os.system('split -l 1000000 ' + raw_data_file)
# Step 1.2 - get split filenames
split_filenames = []
for c1 in ascii_lowercase:
for c2 in ascii_lowercase:
filename = 'x'+c1+c2
if(os.path.isfile(filename)):
split_filenames.append(filename)
'''
# Claire's note: I'm pretty sure split -l 10000 raw_data_file will always make
# at least xaa, and so len(split_filenames) should never be zero...
if len(split_filenames) == 0:
split_filenames = [raw_data_file]
raw_filenames = split_filenames
'''
return split_filenames
##########################################################################
##% Functions to parse specific summary_file attributes for
##% different raw2otu.py processes
##########################################################################
def parse_input_files(options, summary_obj, amplicon_type):
"""
Parses the raw data file input and returns the file name
and file type.
Parameters
----------
options The options that were passed to raw2otu.py.
Should have options.input_dir
summary_obj SummaryParser object
amplicon_type '16S' or 'ITS'
Returns
-------
raw_data_file raw data file path (i.e. input_dir/raw_file)
raw_data_summary_file file path to file with raw data to sample ID map
raw_file_type 'FASTQ' or 'FASTA'
barcodes_map barcodes map file
primers_files primers file
"""
raw_data_file = None
raw_data_summary_file = None
# Extract file locations
if amplicon_type == '16S':
primers_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['PRIMERS_FILE'])
barcodes_map = os.path.join(options.input_dir, summary_obj.attribute_value_16S['BARCODES_MAP'])
try:
raw_data_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTQ_FILE'])
raw_file_type = 'FASTQ'
except:
print("No single raw FASTQ file found. Checking for raw FASTA.")
try:
raw_data_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTA_FILE'])
raw_file_type = 'FASTA'
except:
print("No single raw FASTA file found either. Checking for multiple files.")
try:
raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTQ_FILES'])
raw_file_type = 'FASTQ'
except:
print("No filename of multiple raw FASTQs map provided. Check contents of your raw data and summary file.")
try:
raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_16S['RAW_FASTA_FILES'])
raw_file_type = 'FASTA'
except:
print("No filename of multiple raw FASTAs map provided. Check contents of your raw data and summary file.")
raise NameError("Unable to retrieve raw sequencing files.")
elif amplicon_type == 'ITS':
primers_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['PRIMERS_FILE'])
barcodes_map = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['BARCODES_MAP'])
try:
raw_data_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['RAW_FASTQ_FILE'])
raw_file_type = 'FASTQ'
except:
print("No single raw FASTQ file found. Checking for raw FASTA.")
try:
raw_data_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['RAW_FASTA_FILE'])
raw_file_type = 'FASTA'
except:
print("No single raw FASTA file found either. Checking for multiple files.")
try:
raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['RAW_FASTQ_FILES'])
raw_file_type = 'FASTQ'
except:
print("No filename of multiple raw FASTQs map provided. Check contents of your raw data and summary file.")
raise NameError("Unable to retrieve raw sequencing files.")
try:
raw_data_summary_file = os.path.join(options.input_dir, summary_obj.attribute_value_ITS['RAW_FASTA_FILES'])
raw_file_type = 'FASTA'
except:
print("No filename of multiple raw FASTAs map provided. Check contents of your raw data and summary file.")
raise NameError("Unable to retrieve raw sequencing files.")
return primers_file, barcodes_map, raw_data_file, raw_data_summary_file, raw_file_type
def parse_barcodes_parameters(summary_obj, amplicon_type):
"""
Parses parameters used in splitting by barcodes.
Parameters
----------
summary_obj SummaryParser object
amplicon_type '16S' or 'ITS'
Returns
-------
mode str, either '1', '2', or '3' - indicating
barcodes mode to pass to
2.split_by_barcodes.py
index_file str, If mode '3' is given, the INDEX_FILE
location, otherwise returns None.
index_file_format str (default = 'fastq'). 'fastq', 'fasta',
or 'tab' - for use in 2.split_by_barcodes.py
"""
if amplicon_type == '16S':
if 'BARCODES_MODE' in summary_obj.attribute_value_16S:
mode = summary_obj.attribute_value_16S['BARCODES_MODE']
else:
mode = '2'
# If barcodes are in index file, get those parameters
if mode == '3':
try:
index_file = summary_obj.attribute_value_16S['INDEX_FILE']
except:
raise NameError("Barcodes mode 3 specified (barcodes are in index file), but no INDEX_FILE provided.")
try:
index_file_format = summary_obj.attribute_value_16S['INDEX_FORMAT']
except:
index_file_format = 'fastq'
else:
index_file = None
index_file_format = None
elif amplicon_type == 'ITS':
if 'BARCODES_MODE' in summary_obj.attribute_value_ITS:
mode = summary_obj.attribute_value_ITS['BARCODES_MODE']
else:
mode = '2'
# If barcodes are in index file, get those parameters
if mode == '3':
try:
index_file = summary_obj.attribute_value_ITS['INDEX_FILE']
except:
raise NameError("Barcodes mode 3 specified (barcodes are in index file), but no INDEX_FILE provided.")
try:
index_file_format = summary_obj.attribute_value_ITS['INDEX_FORMAT']
except:
index_file_format = 'fastq'
else:
index_file = None
index_file_format = None
return mode, index_file, index_file_format
def parse_dbotu_parameters(summary_obj, amplicon_type):
"""
Parses summary file for dbOTU options.
Parameters
----------
summary_obj SummaryParser object
amplicon_type '16S' or 'ITS'
Returns
-------
dist max sequence dissimilarity (default = 0.1)
abund minimum fold difference for comparing two OTUs (0=no abundance criterion; default 10.0)
pval minimum p-value for merging OTUs (default: 0.0005)
"""
if amplicon_type == "16S":
try:
dbotu_flag = summary_obj.attribute_value_16S['DBOTU']
except:
dbotu_flag = 'False'
try:
dist = summary_obj.attribute_value_16S['DISTANCE_CRITERIA']
except:
dist = 0.1
try:
abund = summary_obj.attribute_value_16S['ABUNDANCE_CRITERIA']
except:
abund = 10.0
try:
pval = summary_obj.attribute_value_16S['DBOTU_PVAL']
except:
pval = 0.0005
elif amplicon_type == "ITS":
try:
dbotu_flag = summary_obj.attribute_value_ITS['DBOTU']
except:
dbotu_flag = 'False'
try:
dist = summary_obj.attribute_value_ITS['DISTANCE_CRITERIA']
except:
dist = 0.1
try:
abund = summary_obj.attribute_value_ITS['ABUNDANCE_CRITERIA']
except:
abund = 10.0
try:
pval = summary_obj.attribute_value_ITS['DBOTU_PVAL']
except:
pval = 0.0005
else:
raise NameError("Incorrect amplicon type specified for dbOTU summary file parsing")
return dbotu_flag, dist, abund, pval
|
{
"content_hash": "20be4f042ec121d197fcd0a8c19f1bfc",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 132,
"avg_line_length": 40.65822784810127,
"alnum_prop": 0.5639269406392694,
"repo_name": "thomasgurry/amplicon_sequencing_pipeline",
"id": "b61328c31b012f11525705a4459bb41bf386148e",
"size": "9636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/PipelineFilesInterface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "207811"
}
],
"symlink_target": ""
}
|
"""Plain text writer.
Use save() to save a page layout to a plain text file.
"""
def load(file_):
"""Die hard: loading a page layout from a plain text file is impossible."""
raise NotImplemented('Plain text parsing is neither available nor feaseable.')
def save(page, file_):
"""Save a page layout to a plain text file.
Tries to follow the layout by separating columns, paragraphs and lines of
text by empty strings, and by respecting line breaks inside paragraphs.
"""
for cn, column in enumerate(page['content']):
if cn > 0:
file_.write('\n')
for pn, para in enumerate(column['content']):
if pn > 0:
file_.write('\n')
for line in para['content']:
content = ' '.join(word['content'] for word in line['content'])
if isinstance(content, unicode):
content = content.encode('utf-8')
file_.write(content)
file_.write('\n')
|
{
"content_hash": "74283eb122f2f05f2197570a73515aa0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 82,
"avg_line_length": 36,
"alnum_prop": 0.5813492063492064,
"repo_name": "ksa242/pdfsed-tools",
"id": "ac1f339494f4704050bdeb41ed0490a4879fd47c",
"size": "2427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdfsed/txt.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "42172"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1beta1
def sample_update_dataset():
# Create a client
client = aiplatform_v1beta1.DatasetServiceClient()
# Initialize request argument(s)
dataset = aiplatform_v1beta1.Dataset()
dataset.display_name = "display_name_value"
dataset.metadata_schema_uri = "metadata_schema_uri_value"
dataset.metadata.null_value = "NULL_VALUE"
request = aiplatform_v1beta1.UpdateDatasetRequest(
dataset=dataset,
)
# Make the request
response = client.update_dataset(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_DatasetService_UpdateDataset_sync]
|
{
"content_hash": "45afbc68a374ae6aa16cf47ed47f9243",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 28.541666666666668,
"alnum_prop": 0.7284671532846715,
"repo_name": "googleapis/python-aiplatform",
"id": "96bbc054596b25776641c1c474ce529e7503434b",
"size": "1707",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_dataset_service_update_dataset_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
"""Generates the contents of an Cronet LICENSE file for the third-party code.
It makes use of src/tools/licenses.py and the README.chromium files on which
it depends. Based on android_webview/tools/webview_licenses.py.
"""
import optparse
import os
import sys
import textwrap
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'tools'))
import licenses
def _ReadFile(path):
"""Reads a file from disk.
Args:
path: The path of the file to read, relative to the root of the repository.
Returns:
The contents of the file as a string.
"""
return open(os.path.join(REPOSITORY_ROOT, path), 'rb').read()
def GenerateLicense():
"""Generates the contents of an Cronet LICENSE file for the third-party code.
Returns:
The contents of the LICENSE file.
"""
# TODO(mef): Generate list of third_party libraries using checkdeps.
third_party_dirs = [
'base/third_party/libevent',
'third_party/ashmem',
'third_party/boringssl',
'third_party/modp_b64',
'third_party/zlib',
]
# Start with Chromium's LICENSE file
content = [_ReadFile('LICENSE')]
# Add necessary third_party.
for directory in sorted(third_party_dirs):
metadata = licenses.ParseDir(directory, REPOSITORY_ROOT,
require_license_file=True)
content.append('-' * 20)
content.append(directory)
content.append('-' * 20)
license_file = metadata['License File']
if license_file and license_file != licenses.NOT_SHIPPED:
content.append(_ReadFile(license_file))
return '\n'.join(content)
def main():
class FormatterWithNewLines(optparse.IndentedHelpFormatter):
def format_description(self, description):
paras = description.split('\n')
formatted_paras = [textwrap.fill(para, self.width) for para in paras]
return '\n'.join(formatted_paras) + '\n'
parser = optparse.OptionParser(formatter=FormatterWithNewLines(),
usage='%prog command [options]')
parser.description = (__doc__ +
'\nCommands:\n' \
' license [filename]\n' \
' Generate Cronet LICENSE to filename or stdout.\n')
(_, args) = parser.parse_args()
if not args:
parser.print_help()
return 1
if args[0] == 'license':
if len(args) > 1:
print 'Saving license to %s' % args[1]
f = open(args[1], "w")
try:
f.write(GenerateLicense())
finally:
f.close()
else:
print GenerateLicense()
return 0
parser.print_help()
return 1
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "0908cc6d871635361ca93a5ada8b8702",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 28.72340425531915,
"alnum_prop": 0.6340740740740741,
"repo_name": "hujiajie/chromium-crosswalk",
"id": "63a7b07e21d47f79294e7953dd6bbadbff55daf4",
"size": "2881",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "components/cronet/tools/cronet_licenses.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
past: compatibility with Python 2 from Python 3
===============================================
``past`` is a package to aid with Python 2/3 compatibility. Whereas ``future``
contains backports of Python 3 constructs to Python 2, ``past`` provides
implementations of some Python 2 constructs in Python 3 and tools to import and
run Python 2 code in Python 3. It is intended to be used sparingly, as a way of
running old Python 2 code from Python 3 until the code is ported properly.
Potential uses for libraries:
- as a step in porting a Python 2 codebase to Python 3 (e.g. with the ``futurize`` script)
- to provide Python 3 support for previously Python 2-only libraries with the
same APIs as on Python 2 -- particularly with regard to 8-bit strings (the
``past.builtins.str`` type).
- to aid in providing minimal-effort Python 3 support for applications using
libraries that do not yet wish to upgrade their code properly to Python 3, or
wish to upgrade it gradually to Python 3 style.
Here are some code examples that run identically on Python 3 and 2::
>>> from past.builtins import str as oldstr
>>> philosopher = oldstr(u'\u5b54\u5b50'.encode('utf-8'))
>>> # This now behaves like a Py2 byte-string on both Py2 and Py3.
>>> # For example, indexing returns a Python 2-like string object, not
>>> # an integer:
>>> philosopher[0]
'\xe5'
>>> type(philosopher[0])
<past.builtins.oldstr>
>>> # List-producing versions of range, reduce, map, filter
>>> from past.builtins import range, reduce
>>> range(10)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
15
>>> # Other functions removed in Python 3 are resurrected ...
>>> from past.builtins import execfile
>>> execfile('myfile.py')
>>> from past.builtins import raw_input
>>> name = raw_input('What is your name? ')
What is your name? [cursor]
>>> from past.builtins import reload
>>> reload(mymodule) # equivalent to imp.reload(mymodule) in Python 3
>>> from past.builtins import xrange
>>> for i in xrange(10):
... pass
It also provides import hooks so you can import and use Python 2 modules like
this::
$ python3
>>> from past import autotranslate
>>> authotranslate('mypy2module')
>>> import mypy2module
until the authors of the Python 2 modules have upgraded their code. Then, for
example::
>>> mypy2module.func_taking_py2_string(oldstr(b'abcd'))
Credits
-------
:Author: Ed Schofield
:Sponsor: Python Charmers Pty Ltd, Australia: http://pythoncharmers.com
Licensing
---------
Copyright 2013-2014 Python Charmers Pty Ltd, Australia.
The software is distributed under an MIT licence. See LICENSE.txt.
"""
# from past.builtins import *
from past.translation import install_hooks as autotranslate
from future import __version__, __copyright__, __license__
__title__ = 'past'
__author__ = 'Ed Schofield'
|
{
"content_hash": "4576fc70a19e4e9e2a13b59acc9cdf19",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 90,
"avg_line_length": 31.53191489361702,
"alnum_prop": 0.6754385964912281,
"repo_name": "krischer/python-future",
"id": "b434acb90b48126f6ad4f254b60c1c3d22f13a03",
"size": "2979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/past/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2917437"
},
{
"name": "Shell",
"bytes": "539"
}
],
"symlink_target": ""
}
|
def extractVictoriatranslationBlogspotCom(item):
'''
Parser for 'victoriatranslation.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "9055437c940f11e1984699a3e04052db",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27.38095238095238,
"alnum_prop": 0.6452173913043479,
"repo_name": "fake-name/ReadableWebProxy",
"id": "bd1338c1c995ba08258efc4b32c76fee030cc715",
"size": "576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractVictoriatranslationBlogspotCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
import os, glob, sys
from time import sleep
directory = sys.argv[1]
sleep_time = int(sys.argv[2])
files = None
os.chdir(directory)
files = os.listdir()
while (True):
for file in files:
if file.endswith(".jpg") or file.endswith(".png"):
cmd = "feh --bg-fill " + file
os.system(cmd)
sleep(sleep_time)
|
{
"content_hash": "9e7a39095d3fd642c56ef552385a6503",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 21.875,
"alnum_prop": 0.5942857142857143,
"repo_name": "magic-sudo/wallpaper-changer",
"id": "68530c2ed0ec02b1518e4f978b11aabc312c3f65",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wallpaper_changer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "359"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
sys.path.append( "../glapi_parser" )
import apiutil
apiutil.CopyrightC()
print """/* DO NOT EDIT! THIS CODE IS AUTOGENERATED BY unpack.py */
#include "unpacker.h"
#include "cr_opcodes.h"
#include "cr_error.h"
#include "cr_mem.h"
#include "cr_spu.h"
#include "unpack_extend.h"
#include <stdio.h>
#include <memory.h>
const unsigned char *cr_unpackData = NULL;
SPUDispatchTable cr_unpackDispatch;
static void crUnpackExtend(void);
"""
#
# Useful functions
#
def ReadData( offset, arg_type ):
"""Emit a READ_DOUBLE or READ_DATA call for pulling a GL function
argument out of the buffer's operand area."""
if arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "READ_DOUBLE( %d )" % offset
else:
retval = "READ_DATA( %d, %s )" % (offset, arg_type)
return retval
def FindReturnPointer( return_type, params ):
"""For GL functions that return values (either as the return value or
through a pointer parameter) emit a SET_RETURN_PTR call."""
arg_len = apiutil.PacketLength( params )
if (return_type != 'void'):
print '\tSET_RETURN_PTR( %d );' % (arg_len + 8) # extended opcode plus packet length
else:
paramList = [ ('foo', 'void *', 0) ]
print '\tSET_RETURN_PTR( %d );' % (arg_len + 8 - apiutil.PacketLength(paramList))
def FindWritebackPointer( return_type, params ):
"""Emit a SET_WRITEBACK_PTR call."""
arg_len = apiutil.PacketLength( params )
if return_type != 'void':
paramList = [ ('foo', 'void *', 0) ]
arg_len += apiutil.PacketLength( paramList )
print '\tSET_WRITEBACK_PTR( %d );' % (arg_len + 8) # extended opcode plus packet length
def MakeNormalCall( return_type, func_name, params, counter_init = 0 ):
counter = counter_init
copy_of_params = params[:]
for i in range( 0, len(params) ):
(name, type, vecSize) = params[i]
if apiutil.IsPointer(copy_of_params[i][1]):
params[i] = ('NULL', type, vecSize)
copy_of_params[i] = (copy_of_params[i][0], 'void', 0)
if not "get" in apiutil.Properties(func_name):
print '\tcrError( "%s needs to be special cased!" );' % func_name
else:
print "\t%s %s = %s;" % ( copy_of_params[i][1], name, ReadData( counter, copy_of_params[i][1] ) )
counter += apiutil.sizeof(copy_of_params[i][1])
if ("get" in apiutil.Properties(func_name)):
FindReturnPointer( return_type, params )
FindWritebackPointer( return_type, params )
if return_type != "void":
print "\t(void)",
else:
print "\t",
print "cr_unpackDispatch.%s( %s );" % (func_name, apiutil.MakeCallString(params))
def MakeVectorCall( return_type, func_name, arg_type ):
"""Convert a call like glVertex3f to glVertex3fv."""
vec_func = apiutil.VectorFunction(func_name)
params = apiutil.Parameters(vec_func)
assert len(params) == 1
(arg_name, vecType, vecSize) = params[0]
if arg_type == "GLdouble" or arg_type == "GLclampd":
print "#ifdef CR_UNALIGNED_ACCESS_OKAY"
print "\tcr_unpackDispatch.%s((%s) cr_unpackData);" % (vec_func, vecType)
print "#else"
for index in range(0, vecSize):
print "\tGLdouble v" + `index` + " = READ_DOUBLE(", `index * 8`, ");"
if return_type != "void":
print "\t(void) cr_unpackDispatch.%s(" % func_name,
else:
print "\tcr_unpackDispatch.%s(" % func_name,
for index in range(0, vecSize):
print "v" + `index`,
if index != vecSize - 1:
print ",",
print ");"
print "#endif"
else:
print "\tcr_unpackDispatch.%s((%s) cr_unpackData);" % (vec_func, vecType)
keys = apiutil.GetDispatchedFunctions("../glapi_parser/APIspec.txt")
#
# Generate unpack functions for all the simple functions.
#
for func_name in keys:
if (not "pack" in apiutil.ChromiumProps(func_name) or
apiutil.FindSpecial( "unpacker", func_name )):
continue
params = apiutil.Parameters(func_name)
return_type = apiutil.ReturnType(func_name)
print "static void crUnpack%s(void)" % func_name
print "{"
vector_func = apiutil.VectorFunction(func_name)
if (vector_func and len(apiutil.Parameters(vector_func)) == 1):
MakeVectorCall( return_type, func_name, params[0][1] )
else:
MakeNormalCall( return_type, func_name, params )
packet_length = apiutil.PacketLength( params )
if packet_length == 0:
print "\tINCR_DATA_PTR_NO_ARGS( );"
else:
print "\tINCR_DATA_PTR( %d );" % packet_length
print "}\n"
#
# Emit some code
#
print """
typedef struct __dispatchNode {
const unsigned char *unpackData;
struct __dispatchNode *next;
} DispatchNode;
static DispatchNode *unpackStack = NULL;
static SPUDispatchTable *cr_lastDispatch = NULL;
void crUnpackPush(void)
{
DispatchNode *node = (DispatchNode*)crAlloc( sizeof( *node ) );
node->next = unpackStack;
unpackStack = node;
node->unpackData = cr_unpackData;
}
void crUnpackPop(void)
{
DispatchNode *node = unpackStack;
if (!node)
{
crError( "crUnpackPop called with an empty stack!" );
}
unpackStack = node->next;
cr_unpackData = node->unpackData;
crFree( node );
}
void crUnpack( const void *data, const void *opcodes,
unsigned int num_opcodes, SPUDispatchTable *table )
{
unsigned int i;
const unsigned char *unpack_opcodes;
if (table != cr_lastDispatch)
{
crSPUCopyDispatchTable( &cr_unpackDispatch, table );
cr_lastDispatch = table;
}
unpack_opcodes = (const unsigned char *)opcodes;
cr_unpackData = (const unsigned char *)data;
for (i = 0 ; i < num_opcodes ; i++)
{
/*crDebug(\"Unpacking opcode \%d\", *unpack_opcodes);*/
switch( *unpack_opcodes )
{"""
#
# Emit switch cases for all unextended opcodes
#
for func_name in keys:
if "pack" in apiutil.ChromiumProps(func_name):
print '\t\t\tcase %s:' % apiutil.OpcodeName( func_name ),
print 'crUnpack%s(); break;' % func_name
print """
case CR_EXTEND_OPCODE: crUnpackExtend(); break;
default:
crError( "Unknown opcode: %d", *unpack_opcodes );
break;
}
unpack_opcodes--;
}
}"""
#
# Emit unpack functions for extended opcodes, non-special functions only.
#
for func_name in keys:
if ("extpack" in apiutil.ChromiumProps(func_name)
and not apiutil.FindSpecial("unpacker", func_name)):
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
print 'static void crUnpackExtend%s(void)' % func_name
print '{'
MakeNormalCall( return_type, func_name, params, 8 )
print '}\n'
print 'static void crUnpackExtend(void)'
print '{'
print '\tGLenum extend_opcode = %s;' % ReadData( 4, 'GLenum' );
print ''
print '\t/*crDebug(\"Unpacking extended opcode \%d", extend_opcode);*/'
print '\tswitch( extend_opcode )'
print '\t{'
#
# Emit switch statement for extended opcodes
#
for func_name in keys:
if "extpack" in apiutil.ChromiumProps(func_name):
print '\t\tcase %s:' % apiutil.ExtendedOpcodeName( func_name )
print '\t\t\tcrUnpackExtend%s( );' % func_name
print '\t\t\tbreak;'
print """ default:
crError( "Unknown extended opcode: %d", (int) extend_opcode );
break;
}
INCR_VAR_PTR();
}"""
|
{
"content_hash": "229aaa111e3694fed77bf84642b8f5e6",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 100,
"avg_line_length": 26.881226053639846,
"alnum_prop": 0.6748859749144812,
"repo_name": "excid3/chromium",
"id": "f566f32f8d0eaa066a4f67755a17e3d00607966e",
"size": "7016",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "unpacker/unpack.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7746096"
},
{
"name": "C++",
"bytes": "10996"
},
{
"name": "Perl",
"bytes": "35310"
},
{
"name": "Python",
"bytes": "727751"
}
],
"symlink_target": ""
}
|
from rtruffle.node import Node
class ExpressionNode(Node):
def __init__(self, source_section):
Node.__init__(self, source_section)
def create_trivial_method(self, _signature):
return None
def is_trivial_in_sequence(self):
return False
def handle_inlining(self, mgenc): # pylint: disable=W
pass
def handle_outer_inlined(
self, removed_ctx_level, mgenc_with_inlined
): # pylint: disable=W
pass
|
{
"content_hash": "16acf8096f23b1ae3fef40028f8e0e7a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 58,
"avg_line_length": 23.6,
"alnum_prop": 0.6313559322033898,
"repo_name": "smarr/PySOM",
"id": "2d41a9a50e811e5e7edec8f76f9eb99c8977fcad",
"size": "472",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/som/interpreter/ast/nodes/expression_node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1342"
},
{
"name": "Python",
"bytes": "538515"
},
{
"name": "Shell",
"bytes": "407"
}
],
"symlink_target": ""
}
|
"""Implements an Agent that interacts with Google Cloud Storage."""
import io
import time
from apiclient.http import MediaIoBaseDownload
from .gcp_agent import GcpAgent
STORAGE_READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/devstorage.read_only'
STORAGE_READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/devstorage.read_write'
STORAGE_FULL_SCOPE = 'https://www.googleapis.com/auth/devstorage.full_control'
class GcpStorageAgent(GcpAgent):
"""Agent that interacts with Google Cloud Storage service."""
@classmethod
def scope_aliases(cls):
"""Implements GcpAgent interface."""
return {
'read-only': STORAGE_READ_ONLY_SCOPE,
'read-write': STORAGE_READ_WRITE_SCOPE,
'full': STORAGE_FULL_SCOPE
}
@classmethod
def default_discovery_name_and_version(cls):
return 'storage', 'v1'
def inspect_bucket(self, context, bucket, path=None, **kwargs):
"""Get metadata for a bucket or object in the bucket.
Args:
bucket: [string] The bucket to inspect.
path: [string] The name of the object in the bucket.
If None then inspect the bucket itself.
Returns:
Metadata for specified resource.
"""
path = context.eval(path)
if not path:
return self.get_resource(context, 'buckets', bucket=bucket, **kwargs)
else:
return self.get_resource(context, 'objects', bucket=bucket, object=path,
**kwargs)
def list_bucket(self, context, bucket, path_prefix, with_versions, **kwargs):
"""List the contents of the path in the specified bucket.
Args:
bucket: [string] The name of the bucket to list.
path_prefix: [string] The path prefix of objects within the bucket
to list.
with_versions: [boolean] Whether or not to list all the versions.
If path is a directory, this will list all the versions of all the
files. Otherwise just all the versions of the given file.
Returns:
A list of resources.
"""
return super(GcpStorageAgent, self).list_resource(
context, 'objects', bucket=bucket, prefix=path_prefix,
versions=with_versions, **kwargs)
def retrieve_content(
self, context, bucket, path, transform=None, generation=None, **kwargs):
"""Retrieves the content at the specified path.
Args:
bucket: [string] The bucket to retrieve front.
path: [string] The path to the content to retrieve from the bucket.
generation: [long] Specifies version of object (or None for current).
transform: [callable(string)] transform the downloaded bytes into
something else (e.g. a JSON object). If None then the identity.
Returns:
transformed object.
"""
self.logger.info('Retrieving path=%s from bucket=%s [generation=%s]',
path, bucket, generation)
# Get Payload Data
bucket = context.eval(bucket)
path = context.eval(path)
generation = context.eval(generation)
request = self.service.objects().get_media(
bucket=bucket,
object=path,
generation=generation,
**kwargs)
data = io.BytesIO()
downloader = MediaIoBaseDownload(data, request, chunksize=1024*1024)
done = False
while not done:
status, done = downloader.next_chunk()
if status:
self.logger.debug('Download %d%%', int(status.progress() * 100))
result = bytes.decode(data.getvalue())
return result if transform is None else transform(result)
|
{
"content_hash": "7f34b28b3336060bd568f10fe4b3370b",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 82,
"avg_line_length": 33.932038834951456,
"alnum_prop": 0.6669527896995708,
"repo_name": "google/citest",
"id": "4ce16f34f8b96237e4e147f7a1a34ee35a8848c0",
"size": "4092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "citest/gcp_testing/gcp_storage_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "993608"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import numpy as np
from glue.core.component import Component, CategoricalComponent
from glue.core.data import Data
def test_histogram_data():
data = Data(label="Test Data")
comp_a = Component(np.random.uniform(size=500))
comp_b = Component(np.random.normal(size=500))
data.add_component(comp_a, 'uniform')
data.add_component(comp_b, 'normal')
return data
def test_data():
data = Data(label="Test Data 1")
data2 = Data(label="Teset Data 2")
comp_a = Component(np.array([1, 2, 3]))
comp_b = Component(np.array([1, 2, 3]))
comp_c = Component(np.array([2, 4, 6]))
comp_d = Component(np.array([1, 3, 5]))
data.add_component(comp_a, 'a')
data.add_component(comp_b, 'b')
data2.add_component(comp_c, 'c')
data2.add_component(comp_d, 'd')
return data, data2
def test_categorical_data():
data = Data(label="Test Cat Data 1")
data2 = Data(label="Teset Cat Data 2")
comp_x1 = CategoricalComponent(np.array(['a', 'a', 'b']))
comp_y1 = Component(np.array([1, 2, 3]))
comp_x2 = CategoricalComponent(np.array(['c', 'a', 'b']))
comp_y2 = Component(np.array([1, 3, 5]))
data.add_component(comp_x1, 'x1')
data.add_component(comp_y1, 'y1')
data2.add_component(comp_x2, 'x2')
data2.add_component(comp_y2, 'y2')
return data, data2
def test_image():
data = Data(label="Test Image")
comp_a = Component(np.ones((25, 25)))
data.add_component(comp_a, 'test_1')
comp_b = Component(np.zeros((25, 25)))
data.add_component(comp_b, 'test_2')
return data
def test_cube():
data = Data(label="Test Cube")
comp_a = Component(np.ones((16, 16, 16)))
data.add_component(comp_a, 'test_3')
return data
|
{
"content_hash": "98311e68299cf5c75723f6d3173783e3",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 64,
"avg_line_length": 28.919354838709676,
"alnum_prop": 0.6307863915225879,
"repo_name": "saimn/glue",
"id": "b6d92c2e45920998973b5350d39e61e674020b69",
"size": "1793",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "glue/tests/example_data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1609137"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
}
|
"""
Tests for django.core.servers.
"""
import os
import urllib2
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase
from django.core.servers.basehttp import WSGIServerException
from django.test.utils import override_settings
from .models import Person
TEST_ROOT = os.path.dirname(__file__)
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
class LiveServerBase(LiveServerTestCase):
urls = 'regressiontests.servers.urls'
fixtures = ['testdata.json']
@classmethod
def setUpClass(cls):
# Override settings
cls.settings_override = override_settings(**TEST_SETTINGS)
cls.settings_override.enable()
super(LiveServerBase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# Restore original settings
cls.settings_override.disable()
super(LiveServerBase, cls).tearDownClass()
def urlopen(self, url):
return urllib2.urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', WSGIServerException)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# If contrib.staticfiles isn't configured properly, the exception
# should bubble up to the main thread.
old_STATIC_URL = TEST_SETTINGS['STATIC_URL']
TEST_SETTINGS['STATIC_URL'] = None
cls.raises_exception('localhost:8081', ImproperlyConfigured)
TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
def test_test_test(self):
# Intentionally empty method so that the test is picked up by the
# test runner and the overriden setUpClass() method is executed.
pass
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except urllib2.HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
f = self.urlopen('/example_view/')
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
f = self.urlopen('/static/example_static_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
f = self.urlopen('/media/example_media_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
f = self.urlopen('/model_view/')
self.assertEqual(f.read().splitlines(), ['jane', 'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
|
{
"content_hash": "250d9abcf77fbc4a9ddaa3e02d890008",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 78,
"avg_line_length": 32.5,
"alnum_prop": 0.6368836291913215,
"repo_name": "aleida/django",
"id": "9537e1feb3622740c608e67caa59405cbef7c749",
"size": "5070",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/regressiontests/servers/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "50207"
},
{
"name": "JavaScript",
"bytes": "89078"
},
{
"name": "Python",
"bytes": "8135526"
},
{
"name": "Shell",
"bytes": "11901"
}
],
"symlink_target": ""
}
|
import pytest
from sigopt.urllib3_patch import ExpiringHTTPConnectionPool, ExpiringHTTPSConnectionPool
@pytest.mark.parametrize("pool_cls", [ExpiringHTTPConnectionPool, ExpiringHTTPSConnectionPool])
def test_pool_reuses_connections(pool_cls):
pool = pool_cls(host="sigopt.com", expiration_seconds=30)
conn1 = pool._get_conn()
pool._put_conn(conn1)
conn2 = pool._get_conn()
assert conn1 is conn2
@pytest.mark.parametrize("pool_cls", [ExpiringHTTPConnectionPool, ExpiringHTTPSConnectionPool])
def test_pool_expires_connections(pool_cls):
pool = pool_cls(host="sigopt.com", expiration_seconds=0)
conn1 = pool._get_conn()
pool._put_conn(conn1)
conn2 = pool._get_conn()
assert conn1 is not conn2
|
{
"content_hash": "1fd899116bde0d46c9d4f1b77b9862e9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 95,
"avg_line_length": 37.578947368421055,
"alnum_prop": 0.7647058823529411,
"repo_name": "sigopt/sigopt-python",
"id": "365e641282df2d265c57cbc9f377231f81fa3ffd",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_urllib3_patch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2411"
},
{
"name": "Makefile",
"bytes": "545"
},
{
"name": "Python",
"bytes": "542280"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
}
|
from unittest import main, TestCase
from tempfile import NamedTemporaryFile
from labcontrol.db.configuration_manager import ConfigurationManager
class TestConfigurationManager(TestCase):
def test_create(self):
with NamedTemporaryFile() as tmp_f:
ConfigurationManager.create(
tmp_f.name, True, '/path/to/server.cert',
'/path/to/server.key', '/path/to/cookie_secret.bla', 'db_host',
'db_port', 'db_name', 'db_user', 'db_password',
'db_admin_user', 'db_admin_password',
'/path/to/logdir', '')
with open(tmp_f.name) as obs_f:
obs = obs_f.read()
obs = obs.splitlines()
exp = EXP_CONFIG_FILE.splitlines()
# Removing the first line as it contains a date that is generated
# when the test is run
self.assertEqual(obs[1:], exp)
def test_create_qiita(self):
with NamedTemporaryFile() as tmp_f:
ConfigurationManager.create(
tmp_f.name, True, '/path/to/server.cert',
'/path/to/server.key', '/path/to/cookie_secret.bla', 'db_host',
'db_port', 'db_name', 'db_user', 'db_password',
'db_admin_user', 'db_admin_password', '/path/to/logdir',
'server_cert')
with open(tmp_f.name) as obs_f:
obs = obs_f.read()
obs = obs.splitlines()
exp = EXP_CONFIG_FILE_QIITA.splitlines()
# Removing the first line as it contains a date that is generated
# when the test is run
self.assertEqual(obs[1:], exp)
EXP_CONFIG_FILE = """
# ------------------------- MAIN SETTINGS ----------------------------------
[main]
TEST_ENVIRONMENT=True
LOG_DIR=/path/to/logdir
CERTIFICATE_FILEPATH=/path/to/server.cert
KEY_FILEPATH=/path/to/server.key
COOKIE_SECRET=/path/to/cookie_secret.bla
# ----------------------- POSTGRES SETTINGS --------------------------------
[postgres]
USER=db_user
PASSWORD=db_password
ADMIN_USER=db_admin_user
ADMIN_PASSWORD=db_admin_password
DATABASE=db_name
HOST=db_host
PORT=db_port
# ------------------------- QIITA SETTINGS ----------------------------------
[qiita]
SERVER_CERT=
"""
EXP_CONFIG_FILE_QIITA = """
# ------------------------- MAIN SETTINGS ----------------------------------
[main]
TEST_ENVIRONMENT=True
LOG_DIR=/path/to/logdir
CERTIFICATE_FILEPATH=/path/to/server.cert
KEY_FILEPATH=/path/to/server.key
COOKIE_SECRET=/path/to/cookie_secret.bla
# ----------------------- POSTGRES SETTINGS --------------------------------
[postgres]
USER=db_user
PASSWORD=db_password
ADMIN_USER=db_admin_user
ADMIN_PASSWORD=db_admin_password
DATABASE=db_name
HOST=db_host
PORT=db_port
# ------------------------- QIITA SETTINGS ----------------------------------
[qiita]
SERVER_CERT=server_cert
"""
if __name__ == '__main__':
main()
|
{
"content_hash": "317a4f3ef7e3c28dbbb2c4750d53e969",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 30.103092783505154,
"alnum_prop": 0.5486301369863014,
"repo_name": "jdereus/labman",
"id": "3ae51ef13c6286162a87f8a1d5019d2255b90ec8",
"size": "3269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labcontrol/db/tests/test_configuration_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11468"
},
{
"name": "HTML",
"bytes": "636240"
},
{
"name": "JavaScript",
"bytes": "265086"
},
{
"name": "Python",
"bytes": "1090912"
},
{
"name": "SQLPL",
"bytes": "66746"
}
],
"symlink_target": ""
}
|
from sqlalchemy import Index
from ceilometer.storage.sqlalchemy import models
def upgrade(migrate_engine):
index = Index('idx_meter_rid_cname', models.Meter.resource_id,
models.Meter.counter_name)
index.create(bind=migrate_engine)
def downgrade(migrate_engine):
index = Index('idx_meter_rid_cname', models.Meter.resource_id,
models.Meter.counter_name)
index.drop(bind=migrate_engine)
|
{
"content_hash": "b3fc910dafe40c75354dabe178136f57",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 66,
"avg_line_length": 29.4,
"alnum_prop": 0.6984126984126984,
"repo_name": "rackerlabs/instrumented-ceilometer",
"id": "d275c511111cf4fb3f4c54a29082748ea4a41e8a",
"size": "1017",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149656"
},
{
"name": "JavaScript",
"bytes": "361114"
},
{
"name": "Python",
"bytes": "1897887"
},
{
"name": "Shell",
"bytes": "1322"
}
],
"symlink_target": ""
}
|
"""Implementation of an image service that uses Glance as the backend."""
from __future__ import absolute_import
import copy
import inspect
import itertools
import random
import sys
import time
import cryptography
import glanceclient
from glanceclient.common import http
import glanceclient.exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import sslutils
from oslo_utils import excutils
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
from jacket.compute import exception
from jacket.i18n import _LE, _LI, _LW
import jacket.compute.image.download as image_xfers
from jacket.objects import compute as objects
from jacket.compute import signature_utils
glance_opts = [
cfg.StrOpt('host',
default='$my_ip',
# TODO(sdague): remove in N
deprecated_for_removal=True,
help='DEPRECATED: Glance server hostname or IP address. '
'Use the "api_servers" option instead.'),
cfg.IntOpt('port',
default=9292,
min=1,
max=65535,
# TODO(sdague): remove in N
deprecated_for_removal=True,
help='DEPRECATED: Glance server port. Use the "api_servers" '
'option instead.'),
cfg.StrOpt('protocol',
default='http',
choices=('http', 'https'),
# TODO(sdague): remove in N
deprecated_for_removal=True,
help='DEPRECATED: Protocol to use when connecting to glance. '
'Set to https for SSL. Use the "api_servers" option '
'instead.'),
cfg.ListOpt('api_servers',
help='''
A list of the glance api servers endpoints available to compute. These
should be fully qualified urls of the form
"scheme://hostname:port[/path]" (i.e. "http://10.0.1.0:9292" or
"https://my.glance.server/image")'''),
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.IntOpt('num_retries',
default=0,
help='Number of retries when uploading / downloading an image '
'to / from glance.'),
cfg.ListOpt('allowed_direct_url_schemes',
default=[],
help='A list of url scheme that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
cfg.BoolOpt('verify_glance_signatures',
default=False,
help='Require Nova to perform signature verification on '
'each image downloaded from Glance.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(glance_opts, 'glance')
CONF.import_opt('auth_strategy', 'jacket.api.compute.auth')
CONF.import_opt('my_ip', 'jacket.compute.netconf')
supported_glance_versions = (1, 2)
def generate_glance_url():
"""Generate the URL to glance."""
glance_host = CONF.glance.host
if netutils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
return "%s://%s:%d" % (CONF.glance.protocol, glance_host,
CONF.glance.port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
def _endpoint_from_image_ref(image_href):
"""Return the image_ref and guessed endpoint from an image url.
:param image_href: href of an image
:returns: a tuple of the form (image_id, endpoint_url)
"""
parts = image_href.split('/')
image_id = parts[-1]
# the endpoint is everything in the url except the last 3 bits
# which are version, 'images', and image_id
endpoint = '/'.join(parts[:-3])
return (image_id, endpoint)
def generate_identity_headers(context, status='Confirmed'):
return {
'X-Auth-Token': getattr(context, 'auth_token', None),
'X-User-Id': getattr(context, 'user', None),
'X-Tenant-Id': getattr(context, 'tenant', None),
'X-Roles': ','.join(getattr(context, 'roles', [])),
'X-Identity-Status': status,
}
def _glanceclient_from_endpoint(context, endpoint, version=1):
"""Instantiate a new glanceclient.Client object."""
params = {}
# NOTE(sdague): even if we aren't using keystone, it doesn't
# hurt to send these headers.
params['identity_headers'] = generate_identity_headers(context)
if endpoint.startswith('https://'):
# https specific params
params['insecure'] = CONF.glance.api_insecure
params['ssl_compression'] = False
sslutils.is_enabled(CONF)
if CONF.ssl.cert_file:
params['cert_file'] = CONF.ssl.cert_file
if CONF.ssl.key_file:
params['key_file'] = CONF.ssl.key_file
if CONF.ssl.ca_file:
params['cacert'] = CONF.ssl.ca_file
return glanceclient.Client(str(version), endpoint, **params)
def _determine_curr_major_version(endpoint):
"""Determines the current major version of the glance API in use
:returns Integer version number or None if unable to determine version
"""
http_client = http.HTTPClient(endpoint)
try:
response, content = http_client.get('/versions')
for version in content['versions']:
if version['status'] == 'CURRENT':
res = version['id']
# The 'id' value looks like "v2.2",
# so grab the major version number which is 2 in this case
res = int(res[1:res.find(".")])
return res if res in supported_glance_versions else None
except Exception:
LOG.error(_LE("Unable to determine the glance API version"))
def get_api_servers():
"""Shuffle a list of CONF.glance.api_servers and return an iterator
that will cycle through the list, looping around to the beginning
if necessary.
"""
api_servers = []
configured_servers = ([generate_glance_url()]
if CONF.glance.api_servers is None
else CONF.glance.api_servers)
for api_server in configured_servers:
if '//' not in api_server:
api_server = 'http://' + api_server
# NOTE(sdague): remove in N.
LOG.warn(
_LW("No protocol specified in for api_server '%s', "
"please update [glance] api_servers with fully "
"qualified url including scheme (http / https)"),
api_server)
api_servers.append(api_server)
random.shuffle(api_servers)
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, endpoint=None, version=1):
if endpoint is not None:
self.client = self._create_static_client(context,
endpoint,
version)
else:
self.client = None
self.api_servers = None
def _create_static_client(self, context, endpoint, version):
"""Create a client that we'll use for every call."""
self.api_server = str(endpoint)
return _glanceclient_from_endpoint(context, endpoint, version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers()
self.api_server = next(self.api_servers)
return _glanceclient_from_endpoint(context, self.api_server, version)
def call(self, context, version, method, *args, **kwargs):
"""Call a glance client method. If we get a connection error,
retry the request according to CONF.glance.num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
retries = CONF.glance.num_retries
if retries < 0:
LOG.warning(_LW("Treating negative config value (%(retries)s) for "
"'glance.num_retries' as 0."),
{'retries': retries})
retries = 0
num_attempts = retries + 1
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
result = getattr(client.images, method)(*args, **kwargs)
if inspect.isgenerator(result):
# Convert generator results to a list, so that we can
# catch any potential exceptions now and retry the call.
return list(result)
return result
except retry_excs as e:
if attempt < num_attempts:
extra = "retrying"
else:
extra = 'done trying'
LOG.exception(_LE("Error contacting glance server "
"'%(server)s' for '%(method)s', "
"%(extra)s."),
{'server': self.api_server,
'method': method, 'extra': extra})
if attempt == num_attempts:
raise exception.GlanceConnectionFailed(
server=str(self.api_server), reason=six.text_type(e))
time.sleep(1)
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
# NOTE(jbresnah) build the table of download handlers at the beginning
# so that operators can catch errors at load time rather than whenever
# a user attempts to use a module. Note this cannot be done in glance
# space when this python module is loaded because the download module
# may require configuration options to be parsed.
self._download_handlers = {}
download_modules = image_xfers.load_transfer_modules()
for scheme, mod in six.iteritems(download_modules):
if scheme not in CONF.glance.allowed_direct_url_schemes:
continue
try:
self._download_handlers[scheme] = mod.get_download_handler()
except Exception as ex:
LOG.error(_LE('When loading the module %(module_str)s the '
'following error occurred: %(ex)s'),
{'module_str': str(mod), 'ex': ex})
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = _extract_query_params(kwargs)
try:
images = self._client.call(context, 1, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if _is_image_available(context, image):
_images.append(_translate_from_glance(image))
return _images
def show(self, context, image_id, include_locations=False,
show_deleted=True):
"""Returns a dict with image data for the given opaque image id.
:param context: The context object to pass to image client
:param image_id: The UUID of the image
:param include_locations: (Optional) include locations in the returned
dict of information if the image service API
supports it. If the image service API does
not support the locations attribute, it will
still be included in the returned dict, as an
empty list.
:param show_deleted: (Optional) show the image even the status of
image is deleted.
"""
version = 1
if include_locations:
version = 2
try:
image = self._client.call(context, version, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not show_deleted and getattr(image, 'deleted', False):
raise exception.ImageNotFound(image_id=image_id)
if not _is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
image = _translate_from_glance(image,
include_locations=include_locations)
if include_locations:
locations = image.get('locations', None) or []
du = image.get('direct_url', None)
if du:
locations.append({'url': du, 'metadata': {}})
image['locations'] = locations
return image
def _get_transfer_module(self, scheme):
try:
return self._download_handlers[scheme]
except KeyError:
return None
except Exception:
LOG.error(_LE("Failed to instantiate the download handler "
"for %(scheme)s"), {'scheme': scheme})
return
def download(self, context, image_id, data=None, dst_path=None):
"""Calls out to Glance for data and writes data."""
if CONF.glance.allowed_direct_url_schemes and dst_path is not None:
image = self.show(context, image_id, include_locations=True)
for entry in image.get('locations', []):
loc_url = entry['url']
loc_meta = entry['metadata']
o = urlparse.urlparse(loc_url)
xfer_mod = self._get_transfer_module(o.scheme)
if xfer_mod:
try:
xfer_mod.download(context, o, dst_path, loc_meta)
LOG.info(_LI("Successfully transferred "
"using %s"), o.scheme)
return
except Exception:
LOG.exception(_LE("Download image error"))
try:
image_chunks = self._client.call(context, 1, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
# Retrieve properties for verification of Glance image signature
verifier = None
if CONF.glance.verify_glance_signatures:
image_meta_dict = self.show(context, image_id,
include_locations=False)
image_meta = objects.ImageMeta.from_dict(image_meta_dict)
img_signature = image_meta.properties.get('img_signature')
img_sig_hash_method = image_meta.properties.get(
'img_signature_hash_method'
)
img_sig_cert_uuid = image_meta.properties.get(
'img_signature_certificate_uuid'
)
img_sig_key_type = image_meta.properties.get(
'img_signature_key_type'
)
try:
verifier = signature_utils.get_verifier(context,
img_sig_cert_uuid,
img_sig_hash_method,
img_signature,
img_sig_key_type)
except exception.SignatureVerificationError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Image signature verification failed '
'for image: %s'), image_id)
close_file = False
if data is None and dst_path:
data = open(dst_path, 'wb')
close_file = True
if data is None:
# Perform image signature verification
if verifier:
try:
for chunk in image_chunks:
verifier.update(chunk)
verifier.verify()
LOG.info(_LI('Image signature verification succeeded '
'for image: %s'), image_id)
except cryptography.exceptions.InvalidSignature:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Image signature verification failed '
'for image: %s'), image_id)
return image_chunks
else:
try:
for chunk in image_chunks:
if verifier:
verifier.update(chunk)
data.write(chunk)
if verifier:
verifier.verify()
LOG.info(_LI('Image signature verification succeeded '
'for image %s'), image_id)
except cryptography.exceptions.InvalidSignature:
data.truncate(0)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Image signature verification failed '
'for image: %s'), image_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error writing to %(path)s: %(exception)s"),
{'path': dst_path, 'exception': ex})
finally:
if close_file:
data.close()
def create(self, context, image_meta, data=None):
"""Store the image data and return the new image object."""
sent_service_image_meta = _translate_to_glance(image_meta)
if data:
sent_service_image_meta['data'] = data
try:
recv_service_image_meta = self._client.call(
context, 1, 'create', **sent_service_image_meta)
except glanceclient.exc.HTTPException:
_reraise_translated_exception()
return _translate_from_glance(recv_service_image_meta)
def update(self, context, image_id, image_meta, data=None,
purge_props=True):
"""Modify the given image with the new data."""
image_meta = _translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
# NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
image_meta['data'] = data
try:
image_meta = self._client.call(context, 1, 'update',
image_id, **image_meta)
except Exception:
_reraise_translated_image_exception(image_id)
else:
return _translate_from_glance(image_meta)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
:raises: NotAuthorized if the user is not an owner.
:raises: ImageNotAuthorized if the user is not authorized.
"""
try:
self._client.call(context, 1, 'delete', image_id)
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_id)
except glanceclient.exc.HTTPForbidden:
raise exception.ImageNotAuthorized(image_id=image_id)
return True
def _extract_query_params(params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
'page_size', 'sort_key', 'sort_dir')
for param in accepted_params:
if params.get(param):
_params[param] = params.get(param)
# ensure filters is a dict
_params.setdefault('filters', {})
# NOTE(vish): don't filter out private images
_params['filters'].setdefault('is_public', 'none')
return _params
def _is_image_available(context, image):
"""Check image availability.
This check is needed in case Nova and Glance are deployed
without authentication turned on.
"""
# The presence of an auth token implies this is an authenticated
# request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
def _is_image_public(image):
# NOTE(jaypipes) V2 Glance API replaced the is_public attribute
# with a visibility attribute. We do this here to prevent the
# glanceclient for a V2 image model from throwing an
# exception from warlock when trying to access an is_public
# attribute.
if hasattr(image, 'visibility'):
return str(image.visibility).lower() == 'public'
else:
return image.is_public
if context.is_admin or _is_image_public(image):
return True
properties = image.properties
if context.project_id and ('owner_id' in properties):
return str(properties['owner_id']) == str(context.project_id)
if context.project_id and ('project_id' in properties):
return str(properties['project_id']) == str(context.project_id)
try:
user_id = properties['user_id']
except KeyError:
return False
return str(user_id) == str(context.user_id)
def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
return image_meta
def _translate_from_glance(image, include_locations=False):
image_meta = _extract_attributes(image,
include_locations=include_locations)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
def _convert_timestamps_to_datetimes(image_meta):
"""Returns image with timestamp fields converted to datetime objects."""
for attr in ['created_at', 'updated_at', 'deleted_at']:
if image_meta.get(attr):
image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
return image_meta
# NOTE(bcwaldon): used to store non-string data in glance metadata
def _json_loads(properties, attr):
prop = properties[attr]
if isinstance(prop, six.string_types):
properties[attr] = jsonutils.loads(prop)
def _json_dumps(properties, attr):
prop = properties[attr]
if not isinstance(prop, six.string_types):
properties[attr] = jsonutils.dumps(prop)
_CONVERT_PROPS = ('block_device_mapping', 'mappings')
def _convert(method, metadata):
metadata = copy.deepcopy(metadata)
properties = metadata.get('properties')
if properties:
for attr in _CONVERT_PROPS:
if attr in properties:
method(properties, attr)
return metadata
def _convert_from_string(metadata):
return _convert(_json_loads, metadata)
def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
def _extract_attributes(image, include_locations=False):
# NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
# 'checksum' depends on 'status' == 'active'
IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public',
'direct_url', 'locations']
queued = getattr(image, 'status') == 'queued'
queued_exclude_attrs = ['disk_format', 'container_format']
include_locations_attrs = ['direct_url', 'locations']
output = {}
for attr in IMAGE_ATTRIBUTES:
if attr == 'deleted_at' and not output['deleted']:
output[attr] = None
elif attr == 'checksum' and output['status'] != 'active':
output[attr] = None
# image may not have 'name' attr
elif attr == 'name':
output[attr] = getattr(image, attr, None)
# NOTE(liusheng): queued image may not have these attributes and 'name'
elif queued and attr in queued_exclude_attrs:
output[attr] = getattr(image, attr, None)
# NOTE(mriedem): Only get location attrs if including locations.
elif attr in include_locations_attrs:
if include_locations:
output[attr] = getattr(image, attr, None)
# NOTE(mdorman): 'size' attribute must not be 'None', so use 0 instead
elif attr == 'size':
output[attr] = getattr(image, attr) or 0
else:
# NOTE(xarses): Anything that is caught with the default value
# will result in an additional lookup to glance for said attr.
# Notable attributes that could have this issue:
# disk_format, container_format, name, deleted, checksum
output[attr] = getattr(image, attr, None)
output['properties'] = getattr(image, 'properties', {})
return output
def _remove_read_only(image_meta):
IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
output = copy.deepcopy(image_meta)
for attr in IMAGE_ATTRIBUTES:
if attr in output:
del output[attr]
return output
def _reraise_translated_image_exception(image_id):
"""Transform the exception for the image but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_image_exception(image_id, exc_value)
six.reraise(new_exc, None, exc_trace)
def _reraise_translated_exception():
"""Transform the exception but keep its traceback intact."""
exc_type, exc_value, exc_trace = sys.exc_info()
new_exc = _translate_plain_exception(exc_value)
six.reraise(new_exc, None, exc_trace)
def _translate_image_exception(image_id, exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.ImageNotAuthorized(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.ImageNotFound(image_id=image_id)
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.ImageBadRequest(image_id=image_id,
response=six.text_type(exc_value))
return exc_value
def _translate_plain_exception(exc_value):
if isinstance(exc_value, (glanceclient.exc.Forbidden,
glanceclient.exc.Unauthorized)):
return exception.Forbidden(six.text_type(exc_value))
if isinstance(exc_value, glanceclient.exc.NotFound):
return exception.NotFound(six.text_type(exc_value))
if isinstance(exc_value, glanceclient.exc.BadRequest):
return exception.Invalid(six.text_type(exc_value))
return exc_value
def get_remote_image_service(context, image_href):
"""Create an image_service and parse the id from the given image_href.
The image_href param can be an href of the form
'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3',
or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the
image_href is a standalone id, then the default image service is returned.
:param image_href: href that describes the location of an image
:returns: a tuple of the form (image_service, image_id)
"""
# NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
return image_service, image_href
try:
(image_id, endpoint) = _endpoint_from_image_ref(image_href)
glance_client = GlanceClientWrapper(context=context,
endpoint=endpoint)
except ValueError:
raise exception.InvalidImageRef(image_href=image_href)
image_service = GlanceImageService(client=glance_client)
return image_service, image_id
def get_default_image_service():
return GlanceImageService()
class UpdateGlanceImage(object):
def __init__(self, context, image_id, metadata, stream):
self.context = context
self.image_id = image_id
self.metadata = metadata
self.image_stream = stream
def start(self):
image_service, image_id = (
get_remote_image_service(self.context, self.image_id))
image_service.update(self.context, image_id, self.metadata,
self.image_stream, purge_props=False)
|
{
"content_hash": "a36d7f89daac1c1fc7e199dbdb847a5d",
"timestamp": "",
"source": "github",
"line_count": 744,
"max_line_length": 79,
"avg_line_length": 39.331989247311824,
"alnum_prop": 0.5828862385948126,
"repo_name": "HybridF5/jacket",
"id": "91bea867b58b24d59f909825be4d98ae31ce334c",
"size": "29899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/compute/image/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
}
|
from geogotchi.base import Geogotchi
__version__ = "0.2.1"
|
{
"content_hash": "f8042f8d18f34d762e8ed6e6ca2cafe5",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 20,
"alnum_prop": 0.7,
"repo_name": "NarrativeTeam/geogotchi",
"id": "3b3db9397bc0428e369350d1670023a36a2122d1",
"size": "1149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geogotchi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29049"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from .. import roi
from .. import utils as core
import itertools
from skimage import morphology
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal)
from nose.tools import assert_equal, assert_true, assert_raises
logger = logging.getLogger(__name__)
def test_rectangles():
shape = (15, 26)
roi_data = np.array(([2, 2, 6, 3], [6, 7, 8, 5], [8, 18, 5, 10]),
dtype=np.int64)
all_roi_inds = roi.rectangles(roi_data, shape)
roi_inds, pixel_list = roi.extract_label_indices(all_roi_inds)
ty = np.zeros(shape).ravel()
ty[pixel_list] = roi_inds
num_pixels_m = (np.bincount(ty.astype(int)))[1:]
re_mesh = ty.reshape(*shape)
for i, (col_coor, row_coor, col_val, row_val) in enumerate(roi_data, 0):
ind_co = np.column_stack(np.where(re_mesh == i + 1))
left, right = np.max([col_coor, 0]), np.min([col_coor + col_val,
shape[0]])
top, bottom = np.max([row_coor, 0]), np.min([row_coor + row_val,
shape[1]])
assert_almost_equal(left, ind_co[0][0])
assert_almost_equal(right-1, ind_co[-1][0])
assert_almost_equal(top, ind_co[0][1])
assert_almost_equal(bottom-1, ind_co[-1][-1])
def test_rings():
center = (100., 100.)
img_dim = (200, 205)
first_q = 10.
delta_q = 5.
num_rings = 7 # number of Q rings
one_step_q = 5.0
step_q = [2.5, 3.0, 5.8]
# test when there is same spacing between rings
edges = roi.ring_edges(first_q, width=delta_q, spacing=one_step_q,
num_rings=num_rings)
print("edges there is same spacing between rings ", edges)
label_array = roi.rings(edges, center, img_dim)
print("label_array there is same spacing between rings", label_array)
label_mask, pixel_list = roi.extract_label_indices(label_array)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(np.max(label_mask)+1))
num_pixels = num_pixels[1:]
# test when there is same spacing between rings
edges = roi.ring_edges(first_q, width=delta_q, spacing=2.5,
num_rings=num_rings)
print("edges there is same spacing between rings ", edges)
label_array = roi.rings(edges, center, img_dim)
print("label_array there is same spacing between rings", label_array)
label_mask, pixel_list = roi.extract_label_indices(label_array)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(np.max(label_mask)+1))
num_pixels = num_pixels[1:]
# test when there is different spacing between rings
edges = roi.ring_edges(first_q, width=delta_q, spacing=step_q,
num_rings=4)
print("edges when there is different spacing between rings", edges)
label_array = roi.rings(edges, center, img_dim)
print("label_array there is different spacing between rings", label_array)
label_mask, pixel_list = roi.extract_label_indices(label_array)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(np.max(label_mask)+1))
num_pixels = num_pixels[1:]
# test when there is no spacing between rings
edges = roi.ring_edges(first_q, width=delta_q, num_rings=num_rings)
print("edges", edges)
label_array = roi.rings(edges, center, img_dim)
print("label_array", label_array)
label_mask, pixel_list = roi.extract_label_indices(label_array)
# number of pixels per ROI
num_pixels = np.bincount(label_mask, minlength=(np.max(label_mask)+1))
num_pixels = num_pixels[1:]
# Did we draw the right number of rings?
print(np.unique(label_array))
actual_num_rings = len(np.unique(label_array)) - 1
assert_equal(actual_num_rings, num_rings)
# Does each ring have more pixels than the last, being larger?
ring_areas = np.bincount(label_array.ravel())[1:]
area_comparison = np.diff(ring_areas)
print(area_comparison)
areas_monotonically_increasing = np.all(area_comparison > 0)
assert_true(areas_monotonically_increasing)
# Test various illegal inputs
assert_raises(ValueError,
lambda: roi.ring_edges(1, 2)) # need num_rings
# width incompatible with num_rings
assert_raises(ValueError,
lambda: roi.ring_edges(1, [1, 2, 3], num_rings=2))
# too few spacings
assert_raises(ValueError,
lambda: roi.ring_edges(1, [1, 2, 3], [1]))
# too many spacings
assert_raises(ValueError,
lambda: roi.ring_edges(1, [1, 2, 3], [1, 2, 3]))
# num_rings conflicts with width, spacing
assert_raises(ValueError,
lambda: roi.ring_edges(1, [1, 2, 3], [1, 2], 5))
def _helper_check(pixel_list, inds, num_pix, edges, center,
img_dim, num_qs):
# recreate the indices using pixel_list and inds values
ty = np.zeros(img_dim).ravel()
ty[pixel_list] = inds
data = ty.reshape(img_dim[0], img_dim[1])
# get the grid values from the center
grid_values = core.radial_grid(img_dim, center)
# get the indices into a grid
zero_grid = np.zeros((img_dim[0], img_dim[1]))
for r in range(num_qs):
vl = (edges[r][0] <= grid_values) & (grid_values < edges[r][1])
zero_grid[vl] = r + 1
# check the num_pixels
num_pixels = []
for r in range(num_qs):
num_pixels.append(int((np.histogramdd(np.ravel(grid_values), bins=1,
range=[[edges[r][0],
(edges[r][1] -
0.000001)]]))[0][0]))
assert_array_equal(num_pix, num_pixels)
def test_segmented_rings():
center = (75, 75)
img_dim = (150, 140)
first_q = 5
delta_q = 5
num_rings = 4 # number of Q rings
slicing = 4
edges = roi.ring_edges(first_q, width=delta_q, spacing=4,
num_rings=num_rings)
print("edges", edges)
label_array = roi.segmented_rings(edges, slicing, center,
img_dim, offset_angle=0)
print("label_array for segmented_rings", label_array)
# Did we draw the right number of ROIs?
label_list = np.unique(label_array.ravel())
actual_num_labels = len(label_list) - 1
num_labels = num_rings * slicing
assert_equal(actual_num_labels, num_labels)
# Did we draw the right ROIs? (1-16 with some zeros around too)
assert_array_equal(label_list, np.arange(num_labels + 1))
# A brittle test to make sure the exactly number of pixels per label
# is never accidentally changed:
# number of pixels per ROI
num_pixels = np.bincount(label_array.ravel())
expected_num_pixels = [18372, 59, 59, 59, 59, 129, 129, 129,
129, 200, 200, 200, 200, 269, 269, 269, 269]
assert_array_equal(num_pixels, expected_num_pixels)
def test_roi_pixel_values():
images = morphology.diamond(8)
# width incompatible with num_rings
label_array = np.zeros((256, 256))
# different shapes for the images and labels
assert_raises(ValueError,
lambda: roi.roi_pixel_values(images, label_array))
# create a label mask
center = (8., 8.)
inner_radius = 2.
width = 1
spacing = 1
edges = roi.ring_edges(inner_radius, width, spacing, num_rings=5)
rings = roi.rings(edges, center, images.shape)
intensity_data, index = roi.roi_pixel_values(images, rings)
assert_array_equal(intensity_data[0], ([1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]))
assert_array_equal([1, 2, 3, 4, 5], index)
def test_roi_max_counts():
img_stack1 = np.random.randint(0, 60, size=(50, ) + (50, 50))
img_stack2 = np.random.randint(0, 60, size=(100, ) + (50, 50))
img_stack1[0][20, 20] = 60
samples = (img_stack1, img_stack2)
label_array = np.zeros((img_stack1[0].shape))
label_array[img_stack1[0] < 20] = 1
label_array[img_stack1[0] > 40] = 2
assert_array_equal(60, roi.roi_max_counts(samples, label_array))
def test_static_test_sets():
images1 = []
for i in range(10):
int_array = np.tril(i*np.ones(50))
int_array[int_array == 0] = i*100
images1.append(int_array)
images2 = []
for i in range(20):
int_array = np.triu(i*np.ones(50))
int_array[int_array == 0] = i*100
images2.append(int_array)
samples = {'sample1': np.asarray(images1), 'sample2': np.asarray(images2)}
roi_data = np.array(([2, 30, 12, 15], [40, 20, 15, 10]), dtype=np.int64)
label_array = roi.rectangles(roi_data, shape=(50, 50))
# get the mean intensities of image sets given as a dictionary
roi_data = []
for k, v in sorted(samples.items()):
intensity, index_list = roi.mean_intensity(v, label_array)
roi_data.append(intensity)
return_values = [roi_data[0][:, 0], roi_data[0][:, 1],
roi_data[1][:, 0], roi_data[1][:, 1], ]
expected_values = [
np.asarray([float(x) for x in range(0, 1000, 100)]),
np.asarray([float(x) for x in range(0, 10, 1)]),
np.asarray([float(x) for x in range(0, 20, 1)]),
np.asarray([float(x) for x in range(0, 2000, 100)])
]
err_msg = ['roi%s of sample%s is incorrect' % (i, j)
for i, j in itertools.product((1, 2), (1, 2))]
for returned, expected, err in zip(return_values,
expected_values, err_msg):
assert_array_equal(returned, expected,
err_msg=err, verbose=True)
def test_circular_average():
image = np.zeros((12, 12))
calib_center = (5, 5)
inner_radius = 1
edges = roi.ring_edges(inner_radius, width=1, spacing=1, num_rings=2)
labels = roi.rings(edges, calib_center, image.shape)
image[labels == 1] = 10
image[labels == 2] = 10
bin_cen, ring_avg = roi.circular_average(image, calib_center, nx=6)
assert_array_almost_equal(bin_cen, [0.70710678, 2.12132034,
3.53553391, 4.94974747, 6.36396103,
7.77817459], decimal=6)
assert_array_almost_equal(ring_avg, [8., 2.5, 5.55555556, 0.,
0., 0.], decimal=6)
def test_kymograph():
calib_center = (25, 25)
inner_radius = 5
edges = roi.ring_edges(inner_radius, width=2, num_rings=1)
labels = roi.rings(edges, calib_center, (50, 50))
images = []
num_images = 100
for i in range(num_images):
int_array = i*np.ones(labels.shape)
images.append(int_array)
kymograph_data = roi.kymograph(np.asarray(images), labels, num=1)
# make sure the the return array has the expected dimensions
expected_shape = (num_images, np.sum(labels[labels == 1]))
assert kymograph_data.shape[0] == expected_shape[0]
assert kymograph_data.shape[1] == expected_shape[1]
# make sure we got one element from each image
assert np.all(kymograph_data[:, 0] == np.arange(num_images))
# given the input data, every row of kymograph_data should be the same
# number
for row in kymograph_data:
assert np.all(row == row[0])
|
{
"content_hash": "320e30a85df8b36baa5fd3c024625b31",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 78,
"avg_line_length": 37.44951140065147,
"alnum_prop": 0.5941549969557276,
"repo_name": "hainm/scikit-xray",
"id": "51dc1d9cc96ab1e405106d2e3a29ccd1e797b1ff",
"size": "13979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skxray/core/tests/test_roi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "28"
},
{
"name": "C",
"bytes": "15189"
},
{
"name": "Python",
"bytes": "592017"
},
{
"name": "Shell",
"bytes": "38"
}
],
"symlink_target": ""
}
|
import arcpy, os, math, json
from arcpy import env
from arcpy import sa
import ErrorHandling as ErrorHandling
class Viewshed:
#def __init__(self, observers, imageService, radius, height, mask, wkidin, wkidproc, wkidout):
def __init__(self, observers, imageService, radius, height, mask, wkid):
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
else:
arcpy.AddMessage('License error')
parent = os.path.dirname((os.path.dirname(__file__)))
sw = os.path.join(parent, 'scratch')
ws = os.path.join(parent, 'data', 'data.gdb')
arcpy.AddMessage(sw)
arcpy.AddMessage(parent)
if not env.scratchWorkspace:
env.scratchWorkspace = sw
if not env.workspace:
env.workspace = ws
self.workspace = str(env.workspace)
env.overwriteOutput = True
self.scratch = str(env.scratchWorkspace)
self.scratchgdb = env.scratchGDB
self.service = imageService
self.height = height
self.mask = mask
self.sref = arcpy.SpatialReference(wkid)
#self.wkidin = wkidin
#self.wkidout=wkidout
#self.wkidproc= wkidproc
#self.srIn = arcpy.SpatialReference(wkidin)
#self.srProc = arcpy.SpatialReference(wkidproc)
#self.srOut = arcpy.SpatialReference(wkidout)
self.obsproc=self.__makeObserver__(observers, 'obs')
self.buffer = self.__makeBuffers__(radius)
self.cellsize = self.__CalculateCellSize__(self.buffer)
self.islyr = self.__CreateISLayer__(imageService)
observersz = self.__appendZs__(self.obsproc)
#self.obsz = self.__makeObserver__(observersz, 'obsz', self.wkidproc)
self.mask = self.__CreateMask__(mask)
def __makeObserver__(self, observers, name, wkid = None):
try:
arcpy.AddMessage("Creating observer...")
## curwkid = None
## if wkid:
## curwkid = wkid
## sref = arcpy.SpatialReference(wkid)
## else:
## curwkid = self.wkidin
## sref = self.srIn
## arcpy.SpatialReference(self.wkidin)
obs = self.__createFC__(observers, self.sref, name)
arcpy.AddMessage("observation fc: " + arcpy.Describe(obs).name)
#obsproj = os.path.join(self.scratchgdb, name+'_proj')
obsout = os.path.join(self.scratchgdb, name+'out')
obs.save(obsout);
## if(curwkid != self.wkidproc):
## arcpy.AddMessage("Projecting observers...")
## arcpy.AddMessage("projected observation fc: " + obsproj)
## arcpy.Project_management(obs,obsproj,self.srProc)
## obsout = obsproj
## else:
## obsout=obs
h=self.height
arcpy.AddField_management(obsout, "OFFSETA", "DOUBLE", "", "", "", "", "NULLABLE", "NON_REQUIRED", "")
arcpy.CalculateField_management(obsout, "OFFSETA", h, "PYTHON", "")
return obsout
except arcpy.ExecuteError:
EH = ErrorHandling.ErrorHandling()
line, filename, err = EH.trace()
m = "Python error on " + line + " of " + __file__ + \
" : with error - " + err
arcpy.AddError(m)
def __makeBuffers__(self, radius):
try:
arcpy.AddMessage("Creating buffer...")
bufferfc = os.path.join("in_memory", "buffers")
arcpy.AddMessage("buffer fc: " + bufferfc)
arcpy.Buffer_analysis(self.obsproc, bufferfc, radius, "FULL", "ROUND", "ALL")
return bufferfc
except arcpy.ExecuteError:
EH = ErrorHandling.ErrorHandling()
line, filename, err = EH.trace()
m = "Python error on " + line + " of " + __file__ + \
" : with error - " + err
arcpy.AddError(m)
def __CalculateCellSize__(self, ds):
try:
arcpy.AddMessage("Calculating cellsize...")
width = arcpy.Describe(ds).extent.width
height = arcpy.Describe(ds).extent.height
return max(float(max(width,height))/2000.0,30.0)
except arcpy.ExecuteError:
EH = ErrorHandling.ErrorHandling()
line, filename, err = EH.trace()
m = "Python error on " + line + " of " + __file__ + \
" : with error - " + err
arcpy.AddError(m)
def __CreateISLayer__(self, service):
try:
arcpy.AddMessage("Creating image service layer...")
outislyr=os.path.join("in_memory",'ras_dsm')
arcpy.AddMessage("image service layer: " + outislyr)
arcpy.MakeImageServerLayer_management(service, outislyr, self.buffer, "", "CLOSEST_TO_CENTER", "", "", "", self.cellsize)
return outislyr
except arcpy.ExecuteError:
EH = ErrorHandling.ErrorHandling()
line, filename, err = EH.trace()
m = "Python error on " + line + " of " + __file__ + \
" : with error - " + err
arcpy.AddError(m)
def __CreateMask__(self, jsonGeo):
try:
arcpy.AddMessage("Creating mask...")
jsonPoly = json.loads(jsonGeo)
rings=arcpy.Array()
for ring in jsonPoly['rings']:
points = arcpy.Array();
for coord in ring:
x=coord[0]
y=coord[1]
z=None
if len(coord)>2:
z=coord[2]
#z=coord[3]
p=arcpy.Point()
p.X=x
p.Y=y
if z:
p.Z=z
points.add(p)
rings.add(points)
wkid = jsonPoly['spatialReference']['wkid']
polySrIn = arcpy.SpatialReference(wkid)
polygon=arcpy.Polygon(rings,polySrIn)
features = []
masktmp = os.path.join("in_memory", 'masktmp')
arcpy.AddMessage("mask fc: " + masktmp)
#mask_proj = os.path.join(self.scratchgdb, 'maskproj')
features.append(polygon)
arcpy.CopyFeatures_management(features, masktmp)
## if(wkid != self.wkidproc):
## arcpy.AddMessage("Projecting mask...")
## arcpy.AddMessage("projected mask fc: " + mask_proj)
## arcpy.Project_management(masktmp, mask_proj, self.srProc)
## mask = mask_proj
## else:
## mask = masktmp
return masktmp
except arcpy.ExecuteError:
EH = ErrorHandling.ErrorHandling()
line, filename, err = EH.trace()
m = "Python error on " + line + " of " + __file__ + \
" : with error - " + err
arcpy.AddError(m)
def createViewshed(self):
try:
tempEnvironment0 = arcpy.env.extent
arcpy.env.extent = self.buffer
tempEnvironment1 = arcpy.env.cellSize
arcpy.env.cellSize = self.cellsize
tempEnvironment2 = arcpy.env.mask
arcpy.env.mask = self.buffer
outraster = sa.Viewshed(self.islyr, self.obsproc, 1, "CURVED_EARTH", 0.13)
#outrastertemp = os.path.join(self.scratch, 'outvis')
#outraster.save(outrastertemp)
vshedtmp = os.path.join("in_memory", 'vshedtmp')
vsheddis = os.path.join("in_memory", 'vsheddis')
#vshed_proj = os.path.join(self.scratchgdb, 'vshedproj')
arcpy.AddMessage("temp vshed fc:" + vshedtmp)
arcpy.AddMessage("dissolved vshed fc: " + vsheddis)
arcpy.env.extent = tempEnvironment0
arcpy.env.cellSize = tempEnvironment1
arcpy.env.mask = tempEnvironment2
arcpy.RasterToPolygon_conversion(outraster, vshedtmp, "SIMPLIFY", "VALUE")
arcpy.Dissolve_management(vshedtmp, vsheddis, "gridcode", "", "MULTI_PART", "DISSOLVE_LINES")
## if(self.wkidproc != self.wkidout):
## arcpy.AddMessage("Projecting output vshed...")
## arcpy.AddMessage("projected vshed fc: " + vshed_proj)
## arcpy.Project_management(vsheddis, vshed_proj, self.srOut)
## vshed=vshed_proj
## else:
## vshed=vsheddis
#vistmp = os.path.join('in_memory', 'visibility')
vis = os.path.join(self.scratchgdb, 'visibility')
arcpy.AddMessage('creating output viewshed: ' + vis)
arcpy.Clip_analysis(vsheddis, self.mask, vis, "")
arcpy.AddMessage("Coppying to output...")
#arcpy.CopyFeatures_management(vistmp, vis)
fset = arcpy.FeatureSet()
fset.load(vis)
return fset
except arcpy.ExecuteError:
EH = ErrorHandling.ErrorHandling()
line, filename, err = EH.trace()
m = "Python error on " + line + " of " + __file__ + \
" : with error - " + err
arcpy.AddError(m)
def __createFC__(self, points, sr, name):
try:
#'-34.04 68.5,-34.05'
coordpairs = points.split(';')
pointGeometryList = []
has_z = False
for coordpair in coordpairs:
pt = arcpy.Point()
coords = coordpair.split(' ')
arcpy.AddMessage(coords)
pt.X = float(coords[0])
pt.Y = float(coords[1])
if len(coords) > 2:
has_z = True
arcpy.AddMessage('adding z...')
pt.z = float(coords[2])
pointGeometry = arcpy.PointGeometry(pt, sr, has_z)
pointGeometryList.append(pointGeometry)
#path = self.scratch + os.sep + 'scratch.gdb' + os.sep + name
path=os.path.join("in_memory",name)
arcpy.AddMessage('path to sourcept: ' + path)
arcpy.AddMessage(path)
arcpy.CopyFeatures_management(pointGeometryList, path)
fset = arcpy.FeatureSet()
fset.load(path)
self.pt = fset
return fset
except arcpy.ExecuteError:
EH = ErrorHandling.ErrorHandling()
line, filename, err = EH.trace()
m = "Python error on " + line + " of " + __file__ + \
" : with error - " + err
arcpy.AddError(m)
def __appendZs__(self, points):
try:
newcoords = ""
coordpairs = points.split(';')
pointGeometryList = []
fields=["SHAPE@X", "SHAPE@Y"]
rows = arcpy.da.SearchCursor(points, fields)
for row in rows:
coordpair = str(row[0]) + ' ' + str(row[1])
arcpy.AddMessage('coords: ' + coordpair)
result = arcpy.GetCellValue_management(self.islyr, coordpair)
e = result.getOutput(0)
if e != 'NoData':
v = int(e) + self.height
else:
v = self.height
if len(newcoords)>0:
newcoords += ";" + coordpair + " " + str(v)
else:
newcoords += coordpair + " " + str(v)
del rows
del row
arcpy.AddMessage('newcoords: ' + newcoords)
return newcoords
except arcpy.ExecuteError:
EH = ErrorHandling.ErrorHandling()
line, filename, err = EH.trace()
m = "Python error on " + line + " of " + __file__ + \
" : with error - " + err
arcpy.AddError(m)
|
{
"content_hash": "ac7e3383e59977a184c4573bc9539e7b",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 133,
"avg_line_length": 41.31818181818182,
"alnum_prop": 0.5225522552255225,
"repo_name": "abrowning80/solutions-geoevent-java",
"id": "a4dbeab1c410856554e902ef94d0f5af641d5535",
"size": "12131",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "solutions-geoevent/10.1.x-10.2.2/processors/geometry-processor/geoprocessing/visibility/Toolshare/scripts/visibility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "138"
},
{
"name": "HTML",
"bytes": "534"
},
{
"name": "Java",
"bytes": "1346608"
},
{
"name": "Python",
"bytes": "109038"
}
],
"symlink_target": ""
}
|
import unittest
from wordpress_xmlrpc import Client
from cwpoliticl import settings
from cwpoliticl.extensions.rpc.images_downloader import ImagesDownload
class WDXmlRpcTest(unittest.TestCase):
def setUp(self):
url = "{}/xmlrpc.php".format(settings.WD_HOST)
self.wp = Client(url, settings.WD_USER, settings.WD_PASSWD)
# self.image_link = 'http://theviewspaper.net/wp-content/uploads/WordsOfTerror-1024x576.jpg'
self.image_link = 'http://localhost:8888/politicl/wp-content/uploads/2016/07/picture-324x160.jpeg'
# def test_get_posts(self):
# wp_call = self.wp.call(GetPosts())
# x = 0
# [ < WordPressPost: hello - world(id=1) >]
# def test_get_user_info(self):
# wp_call = self.wp.call(GetUserInfo())
# x = 0
# < WordPressUser: max >
def test_post_image(self):
# '/var/folders/t1/tylq1lf13nv3rzfll_hh_5fh0000gn/T/politicl/daaf4badb91771a0d23647d713068663'
# '/var/folders/t1/tylq1lf13nv3rzfll_hh_5fh0000gn/T/politicl'
image_location = ImagesDownload.write_image_cache(self.image_link)
self.assertIsNotNone(image_location)
# def test_post_page(self):
# post = WordPressPost()
# post.title = 'Python_Scraping_for_WordPress'
# post.content = 'How to post to wordpress using xml_rpc.'
# post.post_type = "post"
# post.post_status = "publish"
# post.terms_names = {
# 'post_tag': ['scrapy', 'xml_rpc'],
# 'category': ['Tutorial', 'Tests']
# }
# wp_call = self.wp.call(NewPost(post))
#
# x = 0
# def get_image_path(self):
# folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script directory
# image_path = "{}/{}".format(folder, "recipe.jpg")
#
# return image_path
#
# def test_post_page_with_image(self):
# # Step 01
# # prepare metadata
# data = {
# 'name': 'picture.jpeg',
# 'type': 'image/jpeg', # mimetype
# }
#
# path = self.get_image_path()
# with open(path, 'rb') as img:
# data['bits'] = xmlrpc_client.Binary(img.read())
# response = self.wp.call(media.UploadFile(data))
# attachment_id = response['id']
#
# # Step 02
# post = WordPressPost()
# post.title = 'Post by the xml_rpc with a thumbnail'
# post.content = 'How to post to wordpress using xml_rpc with a thumbnail.'
# post.post_type = "post"
# post.post_status = "publish"
# post.terms_names = {
# 'post_tag': ['scrapy', 'xml_rpc'],
# 'category': ['Tutorial', 'Tests']
# }
# post.custom_fields = []
# post.custom_fields.append({
# 'key': 'custom_source_url',
# 'value': 'http://www.scruby.site'
# })
# # cat1 = self.wp.call(taxonomies.GetTerm('category', 'wanghao'))
# # post.terms.append(cat1)
# post.thumbnail = attachment_id
#
# addpost = self.wp.call(posts.NewPost(post))
#
# x = 0
|
{
"content_hash": "d0d8f33cf88626be00bf1f588ce59504",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 116,
"avg_line_length": 38.247191011235955,
"alnum_prop": 0.5264394829612221,
"repo_name": "trujunzhang/djzhang-targets",
"id": "dc39161c24d0f15759b938abf7f900c3078df54a",
"size": "3429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cwpoliticl/cwpoliticl/tests/test_wd_xml_rpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7418804"
},
{
"name": "JavaScript",
"bytes": "936547"
},
{
"name": "PHP",
"bytes": "94539"
},
{
"name": "Python",
"bytes": "564898"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class ComplainConfig(AppConfig):
name = 'complains'
|
{
"content_hash": "641468a4d607e5c46e0b00f59800cd17",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 18.4,
"alnum_prop": 0.7608695652173914,
"repo_name": "Brunux/shityjobs",
"id": "a032f28925e12eb1ee0fa43de7e2be5b6d1be1ad",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "complains/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "829"
},
{
"name": "HTML",
"bytes": "19148"
},
{
"name": "Python",
"bytes": "31386"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import uuid
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import BaseSpatialOperations
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import six
from django.utils.encoding import force_text
from .base import Database
class HanaSpatialOperator(SpatialOperator):
sql_template = '%(lhs)s.%(func)s(%(rhs)s)'
class HanaIsOneSpatialOperator(SpatialOperator):
sql_template = '%(lhs)s.%(func)s(%(rhs)s) = 1'
class HanaIsValueSpatialOperator(SpatialOperator):
sql_template = '%(lhs)s.%(func)s(%(rhs)s) %(op)s %%s'
class DatabaseOperations(BaseDatabaseOperations, BaseSpatialOperations):
compiler_module = 'django_hana.compiler'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'contains': HanaIsOneSpatialOperator(func='ST_CONTAINS'),
'coveredby': HanaIsOneSpatialOperator(func='ST_COVEREDBY'),
'covers': HanaIsOneSpatialOperator(func='ST_COVERS'),
'crosses': HanaIsOneSpatialOperator(func='ST_CROSSES'),
'disjoint': HanaIsOneSpatialOperator(func='ST_DISJOINT'),
'distance': HanaIsValueSpatialOperator(func='ST_DISTANCE', op='='),
'distance_gt': HanaIsValueSpatialOperator(func='ST_DISTANCE', op='>'),
'distance_gte': HanaIsValueSpatialOperator(func='ST_DISTANCE', op='>='),
'distance_lt': HanaIsValueSpatialOperator(func='ST_DISTANCE', op='<'),
'distance_lte': HanaIsValueSpatialOperator(func='ST_DISTANCE', op='<='),
'equals': HanaIsOneSpatialOperator(func='ST_EQUALS'),
'exact': HanaIsOneSpatialOperator(func='ST_EQUALS'),
'intersects': HanaIsOneSpatialOperator(func='ST_INTERSECTS'),
'overlaps': HanaIsOneSpatialOperator(func='ST_OVERLAPS'),
'same_as': HanaIsOneSpatialOperator(func='ST_EQUALS'),
'relate': HanaIsOneSpatialOperator(func='ST_RELATE'),
'touches': HanaIsOneSpatialOperator(func='ST_TOUCHES'),
'within': HanaIsValueSpatialOperator(func='ST_WITHINDISTANCE', op='<='),
}
def __init__(self, connection):
super(DatabaseOperations, self).__init__(connection)
def get_seq_name(self, table, column):
return '%s_%s_seq' % (table, column)
def autoinc_sql(self, table, column):
seq_name = self.quote_name(self.get_seq_name(table, column))
column = self.quote_name(column)
table = self.quote_name(table)
seq_sql = 'CREATE SEQUENCE %(seq_name)s RESET BY SELECT IFNULL(MAX(%(column)s),0) + 1 FROM %(table)s' % locals()
return [seq_sql]
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return 'MOD(WEEKDAY (%s) + 2,7)' % field_name
else:
return 'EXTRACT(%s FROM %s)' % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# very low tech, code should be optimized
ltypes = {
'year': 'YYYY',
'month': 'YYYY-MM',
'day': 'YYYY-MM-DD',
}
cur_type = ltypes.get(lookup_type)
if not cur_type:
return field_name
sql = 'TO_DATE(TO_VARCHAR(%s, "%s"))' % (field_name, cur_type)
return sql
def no_limit_value(self):
return None
def quote_name(self, name):
return '"%s"' % name.replace('"', '""').upper()
def bulk_batch_size(self, fields, objs):
return 2500
def sql_flush(self, style, tables, sequences, allow_cascades=False):
if tables:
sql = [
' '.join([
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)),
])
for table in tables
]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
seq_name = self.get_seq_name(table_name, column_name)
sql.append(' '.join([
'ALTER SEQUENCE',
seq_name,
'RESET BY SELECT IFNULL(MAX(',
column_name,
'),0) + 1 from',
table_name,
]))
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(' '.join([
style.SQL_KEYWORD('ALTER SEQUENCE'),
style.SQL_TABLE(self.get_seq_name(model._meta.db_table, f.column)),
style.SQL_KEYWORD('RESET BY SELECT'),
style.SQL_FIELD('IFNULL(MAX('+f.column+'),0) + 1'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(model._meta.db_table),
]))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append(' '.join([
style.SQL_KEYWORD('ALTER SEQUENCE'),
style.SQL_TABLE(self.get_seq_name(f.m2m_db_table(), 'id')),
style.SQL_KEYWORD('RESET BY SELECT'),
style.SQL_FIELD('IFNULL(MAX(id),0) + 1'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(f.m2m_db_table())
]))
return output
def prep_for_iexact_query(self, x):
return x
def check_aggregate_support(self, aggregate):
"""
Check that the backend supports the provided aggregate.
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
if aggregate.sql_function in ('STDDEV_POP', 'VAR_POP'):
raise NotImplementedError()
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return 127
def start_transaction_sql(self):
return ''
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
seq_name = self.connection.ops.get_seq_name(table_name, pk_name)
sql = 'select {}.currval from dummy'.format(seq_name)
cursor.execute(sql)
return cursor.fetchone()[0]
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
if value.tzinfo:
# HANA doesn't support timezone. If tzinfo is present truncate it.
# Better set USE_TZ=False in settings.py
import datetime
return six.text_type(
datetime.datetime(
value.year, value.month, value.day, value.hour, value.minute, value.second, value.microsecond
)
)
return six.text_type(value)
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return 'UPPER(%s)'
return '%s'
def convert_values(self, value, field):
"""
Type conversion for boolean field. Keping values as 0/1 confuses
the modelforms.
"""
if (field and field.get_internal_type() in ('BooleanField', 'NullBooleanField') and value in (0, 1)):
value = bool(value)
return value
# Decimal to Database. Django == 1.8
def value_to_db_decimal(self, value, max_digits, decimal_places):
return value or None
# Decimal to Database. Django >= 1.9
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
return value or None
def modify_insert_params(self, placeholder, params):
insert_param_groups = []
for p in params:
if isinstance(p, list):
insert_param_groups.append([self.sanitize_bool(value) for value in p])
else:
# As of Django 1.9, modify_insert_params is also called in SQLInsertCompiler.field_as_sql.
# When it's called from there, params is not a list inside a list, but only a list.
insert_param_groups.append(self.sanitize_bool(p))
return insert_param_groups
def modify_update_params(self, params):
return tuple(self.sanitize_bool(param) for param in params)
def modify_params(self, params):
return tuple(self.sanitize_geometry(param) for param in params)
def sanitize_bool(self, param):
if type(param) is bool:
return 1 if param else 0
return param
def sanitize_geometry(self, param):
if type(param) is WKTAdapter:
return str(param)
return param
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
geometry_fields = (
'PointField', 'LineStringField', 'PolygonField',
'MultiPointField', 'MultiLineStringField', 'MultiPolygonField',
)
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
elif internal_type in geometry_fields:
converters.append(self.convert_geometry_value)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if isinstance(value, Database.NClob):
value = force_text(value.read())
return value
def convert_binaryfield_value(self, value, expression, connection, context):
if isinstance(value, Database.Blob):
value = value.read()
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_geometry_value(self, value, expression, connection, context):
if value is not None:
value = ''.join('{:02x}'.format(x) for x in value)
return value
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
def _geo_db_type(self, f):
return 'ST_%s' % f.geom_type
def geo_db_type(self, f):
internal_type = self._geo_db_type(f)
return internal_type if f.geom_type == 'POINT' else 'ST_GEOMETRY'
def get_distance(self, f, value, lookup_type):
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
raise ValueError('SAP HANA does not support distance queries on '
'geometry fields with a geodetic coordinate system. '
'Distance objects; use a numeric value of your '
'distance in degrees instead.')
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
if value is None:
placeholder = '%s'
else:
db_type = self._geo_db_type(f)
placeholder = 'NEW %s(%%s, %s)' % (db_type, f.srid)
if hasattr(value, 'as_sql'):
sql, _ = compiler.compile(value)
placeholder = placeholder % sql
return placeholder
def geometry_columns(self):
from django_hana.models import HanaGeometryColumns
return HanaGeometryColumns
def spatial_ref_sys(self):
from django_hana.models import HanaSpatialRefSys
return HanaSpatialRefSys
|
{
"content_hash": "1fc06241b5a6e4ea6b04ddd313ad05df",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 120,
"avg_line_length": 38.92134831460674,
"alnum_prop": 0.5953377598152425,
"repo_name": "mathebox/django_hana_pyhdb",
"id": "cf5ba9d0f408b65f4f6575ea7b0a679e7f80be5c",
"size": "13856",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_hana/operations.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "81481"
}
],
"symlink_target": ""
}
|
import os
import random
import signal
import torch
from fairseq import distributed_utils, options
from train import main as single_process_main
def main(args):
# Set distributed training parameters for a single node.
args.distributed_world_size = torch.cuda.device_count()
args.distributed_init_method = 'tcp://localhost:{port}'.format(
port=random.randint(10000, 20000))
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(args.distributed_world_size):
args.distributed_rank = i
args.device_id = i
procs.append(mp.Process(target=run, args=(args, error_queue, ), daemon=True))
procs[i].start()
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, error_queue):
try:
args.distributed_rank = distributed_utils.distributed_init(args)
single_process_main(args)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.distributed_rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
msg += original_trace
raise Exception(msg)
if __name__ == '__main__':
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
main(args)
|
{
"content_hash": "0b41e3efe668b6184fb9fb0bb70b0c13",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 85,
"avg_line_length": 32.55,
"alnum_prop": 0.6620583717357911,
"repo_name": "mlperf/training_results_v0.6",
"id": "062067adf65d6699d860a219d9d6ed716c5b21d3",
"size": "2916",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "NVIDIA/benchmarks/transformer/implementations/pytorch/multiprocessing_train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
}
|
name = "Polycyclic Ring Corrections"
shortDesc = u""
longDesc = u"""
"""
|
{
"content_hash": "375bccc4ce393fd441bf407185d4c791",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 36,
"avg_line_length": 12.5,
"alnum_prop": 0.64,
"repo_name": "Molecular-Image-Recognition/Molecular-Image-Recognition",
"id": "fd2f3c03417646fce84f91df75fea6b072ae285f",
"size": "116",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "code/rmgpy/test_data/testing_database/thermo/groups/polycyclic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4715"
},
{
"name": "Python",
"bytes": "5599677"
}
],
"symlink_target": ""
}
|
"""Used to confirm and act on delete requests from the Admin Console."""
import os
import re
import urllib
from google.appengine.api import capabilities
from google.appengine.api import datastore
from google.appengine.ext import webapp
from google.appengine.ext.datastore_admin import config
from google.appengine.ext.datastore_admin import utils
try:
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import operation
except ImportError:
from google.appengine._internal.mapreduce import model
from google.appengine._internal.mapreduce import input_readers
from google.appengine._internal.mapreduce import operation
MAPREDUCE_OBJECTS = [model.MapreduceState.kind(),
model.ShardState.kind()]
XSRF_ACTION = 'delete'
KIND_AND_SIZE_RE = re.compile('^(.*)\|(-?[0-9]+)$')
def DeleteEntity(key):
"""Delete function which deletes all processed entities.
Args:
key: key of the entity to delete.
Yields:
a delete operation if the entity is not an active mapreduce or
DatastoreAdminOperation object.
"""
if key.kind() in MAPREDUCE_OBJECTS:
entity = datastore.Get(key)
if entity and not entity["active"]:
yield operation.db.Delete(key)
elif key.kind() == utils.DatastoreAdminOperation.kind():
entity = datastore.Get(key)
if entity and not entity["active_jobs"]:
yield operation.db.Delete(key)
else:
yield operation.db.Delete(key)
class ConfirmDeleteHandler(webapp.RequestHandler):
"""Handler to deal with requests from the admin console to delete data."""
SUFFIX = 'confirm_delete'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py or get.
This method executes no action, so the method by which it is accessed is
immaterial. Creating a form with get may be a desirable function. That is,
if this builtin is turned on, anyone can create a form to delete a kind by
simply linking to the ConfirmDeleteHandler like so:
<a href="/_ah/datastore_admin/confirm_delete?kind=trash">
Delete all Trash Objects</a>
Args:
handler: the webapp.RequestHandler invoking the method
"""
readonly_warning = not capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
namespace = handler.request.get('namespace')
kinds = handler.request.get_all('kind')
sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds)
(namespace_str, kind_str) = utils.GetPrintableStrs(namespace, kinds)
template_params = {
'readonly_warning': readonly_warning,
'form_target': DoDeleteHandler.SUFFIX,
'kind_list': kinds,
'remainder': remainder,
'sizes_known': sizes_known,
'size_total': size_total,
'app_id': handler.request.get('app_id'),
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'kind_str': kind_str,
'namespace_str': namespace_str,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
}
utils.RenderToResponse(handler, 'confirm_delete.html', template_params)
def get(self):
"""Handler for get requests to datastore_admin/confirm_delete."""
ConfirmDeleteHandler.Render(self)
class DoDeleteHandler(webapp.RequestHandler):
"""Handler to deal with requests from the admin console to delete data."""
SUFFIX = 'delete.do'
DELETE_HANDLER = (
'google.appengine.ext.datastore_admin.delete_handler.DeleteEntity')
INPUT_READER = input_readers.__name__ + '.DatastoreKeyInputReader'
MAPREDUCE_DETAIL = config.MAPREDUCE_PATH + '/detail?mapreduce_id='
def get(self):
"""Handler for get requests to datastore_admin/delete.do.
Status of executed jobs is displayed.
"""
jobs = self.request.get_all('job')
error = self.request.get('error', '')
xsrf_error = self.request.get('xsrf_error', '')
noconfirm_error = self.request.get('noconfirm_error', '')
template_params = {
'job_list': jobs,
'mapreduce_detail': self.MAPREDUCE_DETAIL,
'error': error,
'xsrf_error': xsrf_error,
'noconfirm_error': noconfirm_error,
'datastore_admin_home': config.BASE_PATH,
}
utils.RenderToResponse(self, 'do_delete.html', template_params)
def post(self):
"""Handler for post requests to datastore_admin/delete.do.
Jobs are executed and user is redirected to the get handler.
"""
namespace = self.request.get('namespace')
kinds = self.request.get_all('kind')
(namespace_str, kinds_str) = utils.GetPrintableStrs(namespace, kinds)
token = self.request.get('xsrf_token')
readonly_warning = self.request.get('readonly_warning')
jobs = []
if (readonly_warning == 'True') and not self.request.get(
'confirm_readonly_delete'):
parameters = [('noconfirm_error', '1')]
else:
if utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
op = utils.StartOperation(
'Deleting %s%s' % (kinds_str, namespace_str))
name_template = 'Delete all %(kind)s objects%(namespace)s'
mapreduce_params = {'force_ops_writes': True}
queue = self.request.get('queue')
queue = queue or os.environ.get(
'HTTP_X_APPENGINE_QUEUENAME', 'default')
if queue[0] == '_':
queue = 'default'
jobs = utils.RunMapForKinds(
op.key(),
kinds,
name_template,
self.DELETE_HANDLER,
self.INPUT_READER,
None,
{},
mapreduce_params=mapreduce_params,
queue_name=queue,
max_shard_count=utils.MAPREDUCE_DEFAULT_SHARDS)
error = ''
except Exception, e:
error = self._HandleException(e)
parameters = [('job', job) for job in jobs]
if error:
parameters.append(('error', error))
else:
parameters = [('xsrf_error', '1')]
query = urllib.urlencode(parameters)
self.redirect('%s/%s?%s' % (config.BASE_PATH, self.SUFFIX, query))
def _HandleException(self, e):
"""Make exception handling overrideable by tests.
In normal cases, return only the error string; do not fail to render the
page for user.
"""
return str(e)
|
{
"content_hash": "512e48c4fee9cd647850b4106622db90",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 80,
"avg_line_length": 32.59183673469388,
"alnum_prop": 0.6579524107701941,
"repo_name": "ychen820/microblog",
"id": "c78917e22f97fd7dc4c3ada9aa5f90eebdfe0963",
"size": "6993",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/platform/google_appengine/google/appengine/ext/datastore_admin/delete_handler.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
""" Bitbucket Cloud common package """
|
{
"content_hash": "bd74681fa95430bc19da0b05516d9fbc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 38,
"avg_line_length": 39,
"alnum_prop": 0.6923076923076923,
"repo_name": "MattAgile/atlassian-python-api",
"id": "02c9e932dbf98eba12cb7548727d1316543439e5",
"size": "39",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "atlassian/bitbucket/cloud/common/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35317"
}
],
"symlink_target": ""
}
|
"""Tests for PCI request."""
from nova import exception
from nova.openstack.common import jsonutils
from nova.pci import pci_request as pci_request
from nova import test
_fake_alias1 = """{
"name": "QuicAssist",
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""
_fake_alias11 = """{
"name": "QuicAssist",
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""
_fake_alias2 = """{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "1111",
"device_type": "N"
}"""
_fake_alias3 = """{
"name": "IntelNIC",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}"""
class AliasTestCase(test.NoDBTestCase):
def setUp(self):
super(AliasTestCase, self).setUp()
def test_good_alias(self):
self.flags(pci_alias=[_fake_alias1])
als = pci_request._get_alias_from_config()
self.assertEqual(type(als['QuicAssist']), list)
expect_dict = {
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}
self.assertEqual(expect_dict, als['QuicAssist'][0])
def test_multispec_alias(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias11])
als = pci_request._get_alias_from_config()
self.assertEqual(type(als['QuicAssist']), list)
expect_dict1 = {
"capability_type": "pci",
"product_id": "4443",
"vendor_id": "8086",
"device_type": "ACCEL"
}
expect_dict2 = {
"capability_type": "pci",
"product_id": "4444",
"vendor_id": "8086",
"device_type": "ACCEL"
}
self.assertEqual(expect_dict1, als['QuicAssist'][0])
self.assertEqual(expect_dict2, als['QuicAssist'][1])
def test_wrong_type_aliase(self):
self.flags(pci_alias=[_fake_alias2])
self.assertRaises(exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_wrong_product_id_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "g111",
"vendor_id": "1111",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_wrong_vendor_id_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "0xg111",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_wrong_cap_type_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "usb",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}"""])
self.assertRaises(exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_dup_aliase(self):
self.flags(pci_alias=[
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "NIC"
}""",
"""{
"name": "xxx",
"capability_type": "pci",
"product_id": "1111",
"vendor_id": "8086",
"device_type": "ACCEL"
}"""])
self.assertRaises(
exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def test_aliase_2_request(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': 'ACCEL',
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'device_type': "NIC",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
requests = pci_request._translate_alias_to_requests(
"QuicAssist : 3, IntelNIC: 1")
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
exp_real = zip(expect_request, requests)
for exp, real in exp_real:
self.assertEqual(real, exp)
def test_aliase_2_request_invalid(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
self.assertRaises(exception.PciRequestAliasNotDefined,
pci_request._translate_alias_to_requests,
"QuicAssistX : 3")
def test_get_pci_requests_from_flavor(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'device_type': "NIC",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuicAssist:3, IntelNIC: 1"}}
requests = pci_request.get_pci_requests_from_flavor(flavor)
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
exp_real = zip(expect_request, requests)
for exp, real in exp_real:
self.assertEqual(real, exp)
def test_get_pci_requests_from_flavor_no_extra_spec(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
flavor = {}
requests = pci_request.get_pci_requests_from_flavor(flavor)
self.assertEqual([], requests)
def test_get_instance_pci_requests_no_meta(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
instance = {}
requests = pci_request.get_instance_pci_requests(instance)
self.assertEqual([], requests)
def test_get_instance_pci_requests_no_request(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
instance = {'system_metadata': {'a': 'b'}}
requests = pci_request.get_instance_pci_requests(instance)
self.assertEqual([], requests)
def test_get_instance_pci_requests(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [{
'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'}]
instance = {"system_metadata": {"pci_requests":
jsonutils.dumps(expect_request)}}
requests = pci_request.get_instance_pci_requests(instance)
exp_real = zip(expect_request, requests)
for exp, real in exp_real:
self.assertEqual(real, exp)
def test_get_instance_pci_requests_prefix(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [{
'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'}]
instance = {"system_metadata": {"new_pci_requests":
jsonutils.dumps(expect_request)}}
requests = pci_request.get_instance_pci_requests(instance, 'new_')
exp_real = zip(expect_request, requests)
for exp, real in exp_real:
self.assertEqual(real, exp)
def test_save_flavor_pci_info(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'device_type': "NIC",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuicAssist:3, IntelNIC: 1"}}
meta = {}
pci_request.save_flavor_pci_info(meta, flavor)
real = jsonutils.loads(meta['pci_requests'])
exp_real = zip(expect_request, real)
for exp, real in exp_real:
self.assertEqual(real, exp)
meta = {}
pci_request.save_flavor_pci_info(meta, flavor, "old_")
real = jsonutils.loads(meta['old_pci_requests'])
exp_real = zip(expect_request, real)
for exp, real in exp_real:
self.assertEqual(real, exp)
def test_delete_flavor_pci_info(self):
meta = {"pci_requests": "fake", "old_pci_requests": "fake"}
pci_request.delete_flavor_pci_info(meta, '')
self.assertNotIn('pci_requests', meta)
pci_request.delete_flavor_pci_info(meta, 'old_')
self.assertNotIn('old_pci_requests', meta)
|
{
"content_hash": "9b9c98681d65c1e332f63fe9f44f710a",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 74,
"avg_line_length": 36.44086021505376,
"alnum_prop": 0.4932625159830825,
"repo_name": "imsplitbit/nova",
"id": "af06c4c9ea200a170b71d4e8a25e929ff9e0708f",
"size": "10886",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/tests/pci/test_pci_request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13518591"
},
{
"name": "Shell",
"bytes": "16950"
}
],
"symlink_target": ""
}
|
import datetime
import decimal
import enum
import functools
import math
import os
import re
import uuid
from unittest import mock
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django import get_version
from django.conf import SettingsReference, settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.serializer import BaseSerializer
from django.db.migrations.writer import MigrationWriter, OperationWriter
from django.test import SimpleTestCase
from django.utils.deconstruct import deconstructible
from django.utils.functional import SimpleLazyObject
from django.utils.timezone import get_default_timezone, get_fixed_timezone, utc
from django.utils.translation import gettext_lazy as _
from .models import FoodManager, FoodQuerySet
class Money(decimal.Decimal):
def deconstruct(self):
return (
'%s.%s' % (self.__class__.__module__, self.__class__.__name__),
[str(self)],
{}
)
class TestModel1:
def upload_to(self):
return '/somewhere/dynamic/'
thing = models.FileField(upload_to=upload_to)
class TextEnum(enum.Enum):
A = 'a-value'
B = 'value-b'
class TextTranslatedEnum(enum.Enum):
A = _('a-value')
B = _('value-b')
class BinaryEnum(enum.Enum):
A = b'a-value'
B = b'value-b'
class IntEnum(enum.IntEnum):
A = 1
B = 2
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.TestOperation(\n'
'),'
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
'),'
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
'),'
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(1, 2, kwarg2=4)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsKwargsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' kwarg2=4,\n'
'),'
)
def test_nested_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
custom_migration_operations.operations.ArgsOperation(1, 2),
custom_migration_operations.operations.KwargsOperation(kwarg1=3, kwarg2=4)
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' ),\n'
' arg2=custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=3,\n'
' kwarg2=4,\n'
' ),\n'
'),'
)
def test_multiline_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation("test\n arg1", "test\narg2")
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1='test\\n arg1',\n"
" arg2='test\\narg2',\n"
"),"
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' 1,\n'
' 2,\n'
' ],\n'
'),'
)
def test_nested_operation_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation(
arg=[
custom_migration_operations.operations.KwargsOperation(
kwarg1=1,
kwarg2=2,
),
]
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
' kwarg2=2,\n'
' ),\n'
' ],\n'
'),'
)
class WriterTests(SimpleTestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
class NestedEnum(enum.IntEnum):
A = 1
B = 2
def safe_exec(self, string, value=None):
d = {}
try:
exec(string, globals(), d)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return d
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
self.assertSerializedEqual(decimal.Decimal('1.3'))
self.assertSerializedResultEqual(
decimal.Decimal('1.3'),
("Decimal('1.3')", {'from decimal import Decimal'})
)
self.assertSerializedEqual(Money('1.3'))
self.assertSerializedResultEqual(
Money('1.3'),
("migrations.test_writer.Money('1.3')", {'import migrations.test_writer'})
)
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_multiline_strings(self):
self.assertSerializedEqual(b"foo\nbar")
string, imports = MigrationWriter.serialize(b"foo\nbar")
self.assertEqual(string, "b'foo\\nbar'")
self.assertSerializedEqual("föo\nbár")
string, imports = MigrationWriter.serialize("foo\nbar")
self.assertEqual(string, "'foo\\nbar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set())
)
def test_serialize_lazy_objects(self):
pattern = re.compile(r'^foo$')
lazy_pattern = SimpleLazyObject(lambda: pattern)
self.assertEqual(self.serialize_round_trip(lazy_pattern), pattern)
def test_serialize_enums(self):
self.assertSerializedResultEqual(
TextEnum.A,
("migrations.test_writer.TextEnum['A']", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
TextTranslatedEnum.A,
("migrations.test_writer.TextTranslatedEnum['A']", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
BinaryEnum.A,
("migrations.test_writer.BinaryEnum['A']", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
IntEnum.B,
("migrations.test_writer.IntEnum['B']", {'import migrations.test_writer'})
)
self.assertSerializedResultEqual(
self.NestedEnum.A,
(
"migrations.test_writer.WriterTests.NestedEnum['A']",
{'import migrations.test_writer'},
),
)
self.assertSerializedEqual(self.NestedEnum.A)
field = models.CharField(default=TextEnum.B, choices=[(m.value, m) for m in TextEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"('a-value', migrations.test_writer.TextEnum['A']), "
"('value-b', migrations.test_writer.TextEnum['B'])], "
"default=migrations.test_writer.TextEnum['B'])"
)
field = models.CharField(
default=TextTranslatedEnum.A,
choices=[(m.value, m) for m in TextTranslatedEnum],
)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"('a-value', migrations.test_writer.TextTranslatedEnum['A']), "
"('value-b', migrations.test_writer.TextTranslatedEnum['B'])], "
"default=migrations.test_writer.TextTranslatedEnum['A'])"
)
field = models.CharField(default=BinaryEnum.B, choices=[(m.value, m) for m in BinaryEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=["
"(b'a-value', migrations.test_writer.BinaryEnum['A']), "
"(b'value-b', migrations.test_writer.BinaryEnum['B'])], "
"default=migrations.test_writer.BinaryEnum['B'])"
)
field = models.IntegerField(default=IntEnum.A, choices=[(m.value, m) for m in IntEnum])
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=["
"(1, migrations.test_writer.IntEnum['A']), "
"(2, migrations.test_writer.IntEnum['B'])], "
"default=migrations.test_writer.IntEnum['A'])"
)
def test_serialize_choices(self):
class TextChoices(models.TextChoices):
A = 'A', 'A value'
B = 'B', 'B value'
class IntegerChoices(models.IntegerChoices):
A = 1, 'One'
B = 2, 'Two'
class DateChoices(datetime.date, models.Choices):
DATE_1 = 1969, 7, 20, 'First date'
DATE_2 = 1969, 11, 19, 'Second date'
self.assertSerializedResultEqual(TextChoices.A, ("'A'", set()))
self.assertSerializedResultEqual(IntegerChoices.A, ('1', set()))
self.assertSerializedResultEqual(
DateChoices.DATE_1,
('datetime.date(1969, 7, 20)', {'import datetime'}),
)
field = models.CharField(default=TextChoices.B, choices=TextChoices.choices)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.CharField(choices=[('A', 'A value'), ('B', 'B value')], "
"default='B')",
)
field = models.IntegerField(default=IntegerChoices.B, choices=IntegerChoices.choices)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.IntegerField(choices=[(1, 'One'), (2, 'Two')], default=2)",
)
field = models.DateField(default=DateChoices.DATE_2, choices=DateChoices.choices)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.DateField(choices=["
"(datetime.date(1969, 7, 20), 'First date'), "
"(datetime.date(1969, 11, 19), 'Second date')], "
"default=datetime.date(1969, 11, 19))"
)
def test_serialize_uuid(self):
self.assertSerializedEqual(uuid.uuid1())
self.assertSerializedEqual(uuid.uuid4())
uuid_a = uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')
uuid_b = uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')
self.assertSerializedResultEqual(
uuid_a,
("uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8')", {'import uuid'})
)
self.assertSerializedResultEqual(
uuid_b,
("uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2')", {'import uuid'})
)
field = models.UUIDField(choices=((uuid_a, 'UUID A'), (uuid_b, 'UUID B')), default=uuid_a)
string = MigrationWriter.serialize(field)[0]
self.assertEqual(
string,
"models.UUIDField(choices=["
"(uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'), 'UUID A'), "
"(uuid.UUID('c7853ec1-2ea3-4359-b02d-b54e8f1bcee2'), 'UUID B')], "
"default=uuid.UUID('5c859437-d061-4847-b3f7-e6b78852f8c8'))"
)
def test_serialize_functions(self):
with self.assertRaisesMessage(ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2013, 12, 31, 22, 1, tzinfo=get_fixed_timezone(180)))
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)",
{'import datetime', 'from django.utils.timezone import utc'},
)
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"})
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
("models.TextField(blank=True, null=True)", {'from django.db import models'})
)
def test_serialize_settings(self):
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"})
)
def test_serialize_iterators(self):
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r'^\w+$')
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r'^\w+$'))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$'))")
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r'^[0-9]+$', flags=re.S)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=re.RegexFlag['DOTALL'])")
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ImportError, "No module named 'custom'"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
def test_serialize_range(self):
string, imports = MigrationWriter.serialize(range(1, 5))
self.assertEqual(string, 'range(1, 5)')
self.assertEqual(imports, set())
def test_serialize_builtins(self):
string, imports = MigrationWriter.serialize(range)
self.assertEqual(string, 'range')
self.assertEqual(imports, set())
def test_serialize_unbound_method_reference(self):
"""An unbound method used within a class body can be serialized."""
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""A reference in a local scope can't be serialized."""
class TestModel2:
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaisesMessage(ValueError, 'Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
def test_serialize_frozensets(self):
self.assertSerializedEqual(frozenset())
self.assertSerializedEqual(frozenset("let it go"))
def test_serialize_set(self):
self.assertSerializedEqual(set())
self.assertSerializedResultEqual(set(), ('set()', set()))
self.assertSerializedEqual({'a'})
self.assertSerializedResultEqual({'a'}, ("{'a'}", set()))
def test_serialize_timedelta(self):
self.assertSerializedEqual(datetime.timedelta())
self.assertSerializedEqual(datetime.timedelta(minutes=42))
def test_serialize_functools_partial(self):
value = functools.partial(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_functools_partialmethod(self):
value = functools.partialmethod(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertIsInstance(result, functools.partialmethod)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_serialize_type_none(self):
self.assertSerializedEqual(type(None))
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(
name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)
),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(__file__))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type("Migration", (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_sorted_imports(self):
"""
#24155 - Tests ordering of imports.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AddField("mymodel", "myfield", models.DateTimeField(
default=datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
)),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn(
"import datetime\n"
"from django.db import migrations, models\n"
"from django.utils.timezone import utc\n",
output
)
def test_migration_file_header_comments(self):
"""
Test comments at top of file.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": []
})
dt = datetime.datetime(2015, 7, 31, 4, 40, 0, 0, tzinfo=utc)
with mock.patch('django.db.migrations.writer.now', lambda: dt):
for include_header in (True, False):
with self.subTest(include_header=include_header):
writer = MigrationWriter(migration, include_header)
output = writer.as_string()
self.assertEqual(
include_header,
output.startswith(
"# Generated by Django %s on 2015-07-31 04:40\n\n" % get_version()
)
)
if not include_header:
# Make sure the output starts with something that's not
# a comment or indentation or blank line
self.assertRegex(output.splitlines(keepends=True)[0], r"^[^#\s]+")
def test_models_import_omitted(self):
"""
django.db.models shouldn't be imported if unused.
"""
migration = type("Migration", (migrations.Migration,), {
"operations": [
migrations.AlterModelOptions(
name='model',
options={'verbose_name': 'model', 'verbose_name_plural': 'models'},
),
]
})
writer = MigrationWriter(migration)
output = writer.as_string()
self.assertIn("from django.db import migrations\n", output)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
class DeconstructibleInstances:
def deconstruct(self):
return ('DeconstructibleInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructibleInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructibleInstances)")
def test_register_serializer(self):
class ComplexSerializer(BaseSerializer):
def serialize(self):
return 'complex(%r)' % self.value, {}
MigrationWriter.register_serializer(complex, ComplexSerializer)
self.assertSerializedEqual(complex(1, 2))
MigrationWriter.unregister_serializer(complex)
with self.assertRaisesMessage(ValueError, 'Cannot serialize: (1+2j)'):
self.assertSerializedEqual(complex(1, 2))
def test_register_non_serializer(self):
with self.assertRaisesMessage(ValueError, "'TestModel1' must inherit from 'BaseSerializer'."):
MigrationWriter.register_serializer(complex, TestModel1)
|
{
"content_hash": "04a90cc06b4653df31ddd766d32e8c79",
"timestamp": "",
"source": "github",
"line_count": 749,
"max_line_length": 118,
"avg_line_length": 41.28170894526035,
"alnum_prop": 0.6139068564036223,
"repo_name": "georgemarshall/django",
"id": "ca924f35ecc56f23445bc185fcb6f36a452f6211",
"size": "30924",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/migrations/test_writer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53023"
},
{
"name": "HTML",
"bytes": "172977"
},
{
"name": "JavaScript",
"bytes": "448123"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12112373"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from sqlalchemy import Column, ForeignKey
from sqlalchemy.sql.sqltypes import Boolean, BigInteger, Integer, CHAR, SmallInteger
from destiny.main.bdd import Base
from destiny.utils import rep_model
class TeamStats(Base):
__tablename__ = 'teamstats'
gameId = Column(BigInteger, ForeignKey("matches.gameId"), primary_key=True)
teamId = Column(SmallInteger, primary_key=True)
firstDragon = Column(Boolean)
firstInhibitor = Column(Boolean)
baronKills = Column(SmallInteger)
firstRiftHerald = Column(Boolean)
firstBaron = Column(Boolean)
riftHeraldKills = Column(SmallInteger)
firstBlood = Column(Boolean)
firstTower = Column(Boolean)
inhibitorKills = Column(SmallInteger)
towerKills = Column(SmallInteger)
win = Column(CHAR(6))
dragonKills = Column(SmallInteger)
def __repr__(self):
return rep_model(self)
|
{
"content_hash": "3b81f83c26077c1201b4608f4e23e302",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 84,
"avg_line_length": 32.592592592592595,
"alnum_prop": 0.7261363636363637,
"repo_name": "Schluucht/Destiny",
"id": "8ae4c77a86e3d3e0683002add2162cd1d16e33a8",
"size": "880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "destiny/main/bdd/models/teamstats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57810"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FeaturedResource'
db.create_table('resources_featuredresource', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('topic', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['resources.Topic'])),
('resource_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['resources.ResourceType'])),
('resource', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['resources.Resource'])),
))
db.send_create_signal('resources', ['FeaturedResource'])
# Adding unique constraint on 'FeaturedResource', fields ['topic', 'resource_type']
db.create_unique('resources_featuredresource', ['topic_id', 'resource_type_id'])
def backwards(self, orm):
# Removing unique constraint on 'FeaturedResource', fields ['topic', 'resource_type']
db.delete_unique('resources_featuredresource', ['topic_id', 'resource_type_id'])
# Deleting model 'FeaturedResource'
db.delete_table('resources_featuredresource')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'resources.featuredresource': {
'Meta': {'unique_together': "(('topic', 'resource_type'),)", 'object_name': 'FeaturedResource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.Resource']"}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.ResourceType']"}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.Topic']"})
},
'resources.resource': {
'Meta': {'object_name': 'Resource'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['resources.ResourceType']"}),
'show': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['resources.Topic']", 'symmetrical': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'})
},
'resources.resourcetype': {
'Meta': {'object_name': 'ResourceType'},
'color': ('django.db.models.fields.CharField', [], {'default': "'purple'", 'unique': 'True', 'max_length': '20'}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'resources.topic': {
'Meta': {'ordering': "['name']", 'object_name': 'Topic'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'official_website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['resources']
|
{
"content_hash": "aafeb1206a3d6b587b057ea00fdf7d18",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 182,
"avg_line_length": 72.88495575221239,
"alnum_prop": 0.5618018455560952,
"repo_name": "amitskwalia/codesters",
"id": "08b7813dcdeedddeec5eca348877ddf26d6b7439",
"size": "8260",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "resources/migrations/0004_auto__add_featuredresource__add_unique_featuredresource_topic_resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "696169"
},
{
"name": "C++",
"bytes": "2995"
},
{
"name": "CSS",
"bytes": "91768"
},
{
"name": "JavaScript",
"bytes": "75416"
},
{
"name": "Python",
"bytes": "1345968"
}
],
"symlink_target": ""
}
|
import constant
from database import Database
from arduino import Arduino
from flask import Flask, jsonify, request, render_template
from flask_socketio import SocketIO, emit
app = Flask(__name__)
socketio = SocketIO(app)
arduino = Arduino()
database = Database()
@app.route("/")
def index():
return render_template("acm.html")
@app.route("/getTime")
def getTime():
return arduino.getTime()
@app.route("/resetTime")
def resetTime():
return arduino.resetTime()
@app.route("/registerCard", methods=['POST'])
def registerCard():
data = request.get_json()
# Register RFID
arduinoStatus = arduino.registerCard(data)
if arduinoStatus == data["uid"]:
print("RFID tag registered")
# if new to ACM, create a new entry
if data["isNew"]:
return database.insertUser(data)
# modify if not new to ACM
else:
database.existingUser(data)
return arduinoStatus
else:
return str(0)
@app.route("/laserLog/<laser>/<int:id>/<int:elapsedTime>/<int:existingTime>")
def laserLog(laser, id, elapsedTime, existingTime):
data = {"laserType": laser, "uid": id, "elapsedTime": elapsedTime, "existingTime": existingTime}
database.laserLog(data)
return database.insertLaserTime(data)
@app.route("/scanTest/<int:id>")
def serialTest(id):
# need to figure out why large ids are being sent randomly - probably from bad reads
print("ID: " + str(id));
if 0 < id and id < 50000:
# ignore guest cards
if id not in constant.GUEST_IDS:
# immediately send ID to web app
socketio.emit('scan', id)
# once data is received, send to web app
data = database.retrieveUser(id)
if data != 0:
socketio.emit('data', data)
# send data to arduino
refresh(id, data)
database.scanLog(id)
return str(id)
def refresh(id, data):
# format user data according to card protocol
userData = [ str(id), data[constant.COL_MEMBER_TYPE] ]
userData.extend( data[constant.COL_USES_LASER_A:constant.COL_USES_3D+1] )
return arduino.refreshUser(userData)
@app.route("/lookUp/<int:id>")
def lookUp(id):
data = database.retrieveUser(id)
if data != 0:
socketio.emit('data', data)
return str(1)
return data
@app.route("/laserData/<type>")
def laserData(type):
data = database.retrieveData(type)
return jsonify(data)
if (__name__ == "__main__"):
# app.run(host='0.0.0.0')
socketio.run(app, host='0.0.0.0')
|
{
"content_hash": "4c8ee184f63a91b1af3dbc79a5891dbf",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 97,
"avg_line_length": 24.893617021276597,
"alnum_prop": 0.6965811965811965,
"repo_name": "MakerLabsVan/ACM",
"id": "2b1a32bcb5e3390efa0b5a734e51348e1dec638b",
"size": "2340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acm-web/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "28170"
},
{
"name": "C",
"bytes": "8592"
},
{
"name": "CSS",
"bytes": "3105"
},
{
"name": "HTML",
"bytes": "11607"
},
{
"name": "JavaScript",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "14893"
}
],
"symlink_target": ""
}
|
"""Sample and helpers for Google Assistant API."""
|
{
"content_hash": "2e7554941c5db4f1338c137bba4294d5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 50,
"avg_line_length": 51,
"alnum_prop": 0.7254901960784313,
"repo_name": "Jay2645/Unreal-Google-Assistant",
"id": "e771e048ee4a50a0e6e3cd050d6be89c235e49c1",
"size": "631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Content/Scripts/googlesamples/assistant/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "164"
},
{
"name": "C#",
"bytes": "1371"
},
{
"name": "C++",
"bytes": "8501"
},
{
"name": "Python",
"bytes": "43431"
}
],
"symlink_target": ""
}
|
from threading import Thread
from obnl.core.impl.node import ClientNode as _ClientNodeImpl
class ClientNode(object):
def __init__(self, host, vhost, username, password, config_file, input_attributes=None, output_attributes=None, is_first=False):
self._node_impl = _ClientNodeImpl(host, vhost, username, password, self, config_file, input_attributes, output_attributes, is_first)
@property
def name(self):
"""
:return: the node name. It is the ID of the Node inside the simulation
"""
return self._node_impl.name
@property
def simulation(self):
"""
:return: the simulation ID. Common to all nodes in a simulation (define by the scheduler)
"""
return self._node_impl.simulation
@property
def input_values(self):
"""
:return: a map of input values. The keys are the input attributes
"""
return self._node_impl.input_values
@property
def input_attributes(self):
"""
:return: the list of input attributes
"""
return self._node_impl.input_attributes
@property
def output_attributes(self):
"""
:return: the list of output attributes
"""
return self._node_impl.output_attributes
def start(self):
"""
Starts the listening
"""
Thread(target=self._node_impl.start).start()
def step(self, current_time, time_step):
"""
Abstract function to be implemented by children.
This function is called once per Node per simulation step.
:param current_time: the current time of the simulation
:param time_step: the time step from the last call of this function
"""
raise NotImplementedError('Abstract function call from '+str(self.__class__))
def update_attribute(self, attr, value):
"""
Sends the new attribute value to those who want to know.
:param attr: the attribute to communicate
:param value: the new value of the attribute
"""
self._node_impl.update_attribute(attr, value)
|
{
"content_hash": "488f5ace37885225843efe57068424d4",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 140,
"avg_line_length": 29.904109589041095,
"alnum_prop": 0.6106275767292716,
"repo_name": "IntegrCiTy/obnl",
"id": "6c48fbab9b492563f6ce022cafaa9cb05669f49c",
"size": "2183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "obnl/core/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22753"
}
],
"symlink_target": ""
}
|
from jsonable import JSONable
from .unavailable import Unavailable
class Page(JSONable):
__slots__ = ('id', 'namespace', 'title')
def initialize(self, id, namespace, title):
self.id = Unavailable.otherwise(id, int)
self.namespace = int(namespace)
self.title = str(title)
|
{
"content_hash": "42fe79e54ac5fe7794b732a530a27f28",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 48,
"avg_line_length": 27.818181818181817,
"alnum_prop": 0.6601307189542484,
"repo_name": "halfak/MediaWiki-events",
"id": "7cf484e8577e1ecbd2b88f34b8dfaa36ea806392",
"size": "306",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mwevents/types/page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75110"
}
],
"symlink_target": ""
}
|
"""Support for binary sensor using GC100."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import CONF_PORTS, DATA_GC100
_SENSORS_SCHEMA = vol.Schema({cv.string: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PORTS): vol.All(cv.ensure_list, [_SENSORS_SCHEMA])}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the GC100 devices."""
binary_sensors = []
ports = config[CONF_PORTS]
for port in ports:
for port_addr, port_name in port.items():
binary_sensors.append(
GC100BinarySensor(port_name, port_addr, hass.data[DATA_GC100])
)
add_entities(binary_sensors, True)
class GC100BinarySensor(BinarySensorEntity):
"""Representation of a binary sensor from GC100."""
def __init__(self, name, port_addr, gc100):
"""Initialize the GC100 binary sensor."""
self._name = name or DEVICE_DEFAULT_NAME
self._port_addr = port_addr
self._gc100 = gc100
self._state = None
# Subscribe to be notified about state changes (PUSH)
self._gc100.subscribe(self._port_addr, self.set_state)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
return self._state
def update(self):
"""Update the sensor state."""
self._gc100.read_sensor(self._port_addr, self.set_state)
def set_state(self, state):
"""Set the current state."""
self._state = state == 1
self.schedule_update_ha_state()
|
{
"content_hash": "d4ed4a5feb15091f375e91a9a07e2d3d",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 86,
"avg_line_length": 31.231884057971016,
"alnum_prop": 0.6719257540603248,
"repo_name": "toddeye/home-assistant",
"id": "f93076196a342d389fa76a276446731a765240fb",
"size": "2155",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/gc100/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from model.contact import Contact
#from random import randrange
import random
def test_delete_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="testcontact"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
{
"content_hash": "adad5ead4b29013bdbe2054589b3d709",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 123,
"avg_line_length": 38.5625,
"alnum_prop": 0.7050243111831442,
"repo_name": "dariansk/python_training",
"id": "4193dfde20cbeac857c10dd9d30d4532fdd4a394",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_del_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "531"
},
{
"name": "Python",
"bytes": "39013"
}
],
"symlink_target": ""
}
|
import time
import pathlib
import math
import ezdxf
from ezdxf.math import (
global_bspline_interpolation,
BoundingBox,
linspace,
BSpline,
)
from ezdxf.render import random_3d_path
CWD = pathlib.Path("~/Desktop/Outbox").expanduser()
if not CWD.exists():
CWD = pathlib.Path(".")
def profile_bspline_interpolation(count, path):
for _ in range(count):
global_bspline_interpolation(path)
def profile_vertex_calculation(count, spline, num):
for _ in range(count):
for t in linspace(0.0, spline.max_t, num):
spline.point(t)
def profile(text, func, *args):
t0 = time.perf_counter()
func(*args)
t1 = time.perf_counter()
print(f"{text} {t1 - t0:.3f}s")
def export_path(path):
doc = ezdxf.new()
msp = doc.modelspace()
bbox = BoundingBox(path)
msp.add_polyline3d(path, dxfattribs={"layer": "Path", "color": 2})
spline = msp.add_spline(dxfattribs={"layer": "B-spline", "color": 1})
curve = global_bspline_interpolation(path)
spline.apply_construction_tool(curve)
doc.set_modelspace_vport(center=bbox.center, height=bbox.size[1])
doc.saveas(CWD / "path1.dxf")
path = list(random_3d_path(100, max_step_size=10, max_heading=math.pi * 0.8))
export_path(path)
profile(
"B-spline interpolation 300x: ", profile_bspline_interpolation, 300, path
)
spline = BSpline.from_fit_points(path, degree=3)
profile(
"calculate 25x 1000 B-spline vertices: ",
profile_vertex_calculation,
25,
spline,
1000,
)
|
{
"content_hash": "c3a62ad677bb8633ed9f0d1569f7f3a4",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 24.918032786885245,
"alnum_prop": 0.6657894736842105,
"repo_name": "mozman/ezdxf",
"id": "a6e3dc1cc831f12057a6164a5942c8e15377602a",
"size": "1585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiling/bspline_interpolation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5745"
},
{
"name": "CSS",
"bytes": "3565"
},
{
"name": "Common Lisp",
"bytes": "727"
},
{
"name": "Cython",
"bytes": "111923"
},
{
"name": "HTML",
"bytes": "1417"
},
{
"name": "JavaScript",
"bytes": "11132"
},
{
"name": "Python",
"bytes": "6336553"
}
],
"symlink_target": ""
}
|
'''
Created on 5 Mar 2010
@author: tcezard
'''
import logging
from utils import DNA_tools, utils_logging
import sys
from pprint import pprint
#############################################
# Genes and transcript objects #
#############################################
_all_genes={}
_all_transcripts={}
_all_exons={}
class Gene(object):
"""A gene is a structure that contains Transcripts."""
def __init__(self, gene_id, reference, transcript_list=[]):
self.gene_id=gene_id
self.reference=reference
self.transcript_list=transcript_list
def __str__(self):
return '%s\t%s\n'%(self.reference,self.gene_id)+ '\n'.join([str(t) for t in self.transcript_list])
def to_string(self):
out=['gene %s on %s with %d transcripts'%(self.gene_id,str(self.reference),len(self.transcript_list))]
out.append('\n'.join([t.to_string() for t in self.transcript_list]))
return '\n'.join(out)
def _add_transcript(self,transcript):
self.transcript_list.append(transcript)
class Transcript(object):
"""A Transcript is a structure that contains Exons."""
def __init__(self, gene, transcript_id, strand, transcript_start=None, transcript_end=None, cds_start=None, cds_end=None, exon_list=[]):
self.gene=gene
self.transcript_id=transcript_id
self.transcript_start=transcript_start
self.transcript_end=transcript_end
self.cds_start=cds_start
self.cds_end=cds_end
if DNA_tools.strand_is_positive(strand):
self.strand='+'
self._set_strand_specific_functions()
else:
self.strand='-'
self._set_strand_specific_functions()
self.exon_list=[]
self._add_exons(exon_list)
def __str__(self):
return '\t'.join([str(self.transcript_id), str(self.transcript_start),
str(self.transcript_end), str(self.cds_start), str(self.cds_end), str(self.strand)])
def __cmp__(self, other):
return cmp(self.transcript_id,other.transcript_id)
def to_string(self):
out=['transcript %s with %d exons'%(self.transcript_id,len(self.exon_list))]
#out.append('\n'.join([t.to_string() for t in self.transcript_list]))
return '\n'.join(out)
def get_number_of_exons(self):
return len(self.exon_list)
def get_exon_number(self,exon):
try:
return self.exon_list.index(exon)+1
except ValueError:
return None
def get_position_in_mrna(self,position):
pass
def has_cds(self):
if self.cds_start and self.cds_end:
return True
else:
return False
def get_position_in_cdna(self,position):
pass
def get_mrna_length(self):
if not hasattr(self, "mrna_length"):
self.mrna_length=self.get_mrna_length()
return self.mrna_length
def get_cdna_sequence(self, chr_sequence, complete_sequence=True, trim_sequence=False):
"""retrieve the cDNA sequence from a transcript and a chromosome sequence"""
if hasattr(self, "cdna_sequence") and self.cdna_sequence:
return self.cdna_sequence
else:
return self._get_cdna_sequence(chr_sequence,complete_sequence,trim_sequence)
def _add_exon(self,exon):
self.exon_list.append(exon)
if DNA_tools.strand_is_positive(self.strand):
self.exon_list.sort()
else:
self.exon_list.sort(reverse=True)
def _add_exons(self,exon_list):
self.exon_list.extend(exon_list)
if DNA_tools.strand_is_positive(self.strand):
self.exon_list.sort()
else:
self.exon_list.sort(reverse=True)
def _get_mrna_length(self):
length=0
for exon in self.exon_list:
length+=exon.exon_end-exon.exon_start+1
return length
def _set_strand_specific_functions(self):
if self.strand=='+':
self.get_position_in_mrna=self._get_position_in_mrna_pos_strand
self.get_position_in_cdna=self._get_position_in_cdna_pos_strand
else:
self.get_position_in_mrna=self._get_position_in_mrna_neg_strand
self.get_position_in_cdna=self._get_position_in_cdna_neg_strand
def _get_position_in_mrna_pos_strand(self,position):
"""transform a position in the genome on a position in this mrna if the mrna is on the pos strand"""
i=0
over=False
pos_in_mRNA=0
while not over and i<len(self.exon_list):
exon=self.exon_list[i]
i+=1
if position > exon.exon_end:
pos_in_mRNA+=exon.exon_end-exon.exon_start+1
elif position >= exon.exon_start:
pos_in_mRNA+=(position-exon.exon_start)+1
over=True
else:
logging.error("missuse of get_position_in_mrna function with position %s"%(position))
logging.error("start=%s end=%s"%(exon.exon_start, exon.exon_end))
raise Exception()
pos_in_mRNA=0
if not over:
logging.warning("position %s exons: %s"%(position, ', '.join(['%s-%s'%(exon.exon_start, exon.exon_end) for exon in self.exon_list])))
return None
else:
return pos_in_mRNA
def _get_position_in_mrna_neg_strand(self,position):
"""transform a position in the genome on a position in this mrna if the mrna is on the neg strand"""
i=0
over=False
pos_in_mRNA=0
while not over and i<len(self.exon_list):
exon=self.exon_list[i]
i+=1
if position <= exon.exon_start:
pos_in_mRNA+=exon.exon_end-exon.exon_start+1
elif position < exon.exon_end:
pos_in_mRNA+=(position-exon.exon_start)+1
over=True
else:
logging.error("missuse of get_position_in_mrna function with position %s"%(position))
logging.error("start=%s end=%s"%(exon.exon_start, exon.exon_end))
raise Exception()
pos_in_mRNA=0
if not over:
logging.warning("position %s exons: %s"%(position, ', '.join(['%s-%s'%(exon.exon_start, exon.exon_end) for exon in self.exon_list])))
return None
else:
return pos_in_mRNA
def _get_position_in_cdna_pos_strand(self,position):
"""transform a position in the genome on a position in this cDNA if the cDNA is on the pos strand"""
i=0
over=False
pos_in_cDNA=0
while not over and i<len(self.exon_list):
exon=self.exon_list[i]
i+=1
if exon.exon_cds_start is None:
continue
if position > exon.exon_cds_end:
pos_in_cDNA+=exon.exon_cds_end-exon.exon_cds_start+1
elif position >= exon.exon_cds_start:
pos_in_cDNA+=(position-exon.exon_cds_start)+1
over=True
else:
logging.error("missuse of get_position_in_cdna function with position %s"%(position))
logging.error("%s"%(', '.join(['%s-%s'%(exon.exon_cds_start,exon.exon_cds_end) for exon in self.exon_list])))
raise Exception()
if not over:
logging.warning("position %s not in cds of %s exons: %s"%(position,self.transcript_id,
', '.join(['%s-%s'%(exon.exon_cds_start, exon.exon_cds_end) for exon in self.exon_list])))
return None
else:
return pos_in_cDNA
def _get_position_in_cdna_neg_strand(self,position):
"""transform a position in the genome on a position in this cDNA if the cDNA is on the neg strand"""
i=0
over=False
pos_in_cDNA=0
while not over and i<len(self.exon_list):
exon=self.exon_list[i]
i+=1
if exon.exon_cds_start is None:
continue
if position < exon.exon_cds_start:
pos_in_cDNA+=exon.exon_cds_end-exon.exon_cds_start+1
elif position <= exon.exon_cds_end:
pos_in_cDNA+=(exon.exon_cds_end-position)+1
over=True
else:
logging.error("missuse of get_position_in_cdna function with position %s"%(position))
logging.error("%s"%(', '.join(['%s-%s'%(exon.exon_cds_start,exon.exon_cds_end) for exon in self.exon_list])))
raise Exception()
if not over:
logging.warning("position %s not in cds of %s exons: %s"%(position,self.transcript_id,
', '.join(['%s-%s'%(exon.exon_cds_start, exon.exon_cds_end) for exon in self.exon_list])))
return None
else:
return pos_in_cDNA
def _get_cdna_sequence(self, chr_sequence, complete_sequence=True, trim_sequence=False):
"""retrieve the cDNA sequence from a transcript and a chromosome sequence"""
cdna_seq=[]
exons=self.exon_list[:]
exons.sort()
for exon in exons:
if exon.exon_cds_start and exon.exon_cds_end:
if int(exon.exon_cds_start)-1<len(chr_sequence) and int(exon.exon_cds_end)<=len(chr_sequence):
cdna_seq.append(chr_sequence[int(exon.exon_cds_start)-1:int(exon.exon_cds_end)])
else:
logging.error('coding sequence %s-%s outside of chromosome boundaries length=%s'%(int(exon.exon_cds_start),int(exon.exon_cds_end),len(chr_sequence)))
return None
full_sequence = ''.join(cdna_seq)
if len(full_sequence)==0:
return
#Test the sequence to see if its length is a multiple of 3
if complete_sequence or trim_sequence:
remaining=len(full_sequence)%3
if not remaining==0:
if complete_sequence:
to_add=3-remaining
if DNA_tools.strand_is_positive(self.strand):
#add remaining bases at the end of the sequence
if self.cds_end+to_add<=len(chr_sequence):
full_sequence=full_sequence+chr_sequence[self.cds_end-1:self.cds_end+to_add-1]
else:
full_sequence=full_sequence+('N'*to_add)
else:
#add remaining bases at the beginning of the sequence
if self.cds_start-to_add>0:
full_sequence=chr_sequence[self.cds_start-to_add-1:self.cds_start-1]+full_sequence
else:
full_sequence=('N'*to_add)+full_sequence
logging.warning('%s will have %d bases added cause its length=%s'%(self.transcript_id, to_add,len(full_sequence)-to_add))
if trim_sequence:
if DNA_tools.strand_is_positive(self.strand):
full_sequence=full_sequence[:-remaining]
else:
full_sequence=full_sequence[remaining:]
logging.warning('%s will have %d bases trimmed of the end cause its length=%s now is %s'%(self.transcript_id, remaining,len(''.join(cdna_seq)),len(full_sequence)))
if DNA_tools.strand_is_positive(self.strand):
self.cdna_sequence=full_sequence.upper()
else:
self.cdna_sequence=DNA_tools.rev_complements(full_sequence).upper()
# Test the sequence to see if it starts with an M
AA, rest = translate_sequence(self.cdna_sequence[0:3])
if AA != 'M':
logging.warning('%s starts with %s instead of an M'%(self.transcript_id,AA))
# Test the sequence to see if it ends with a STOP
AA, rest = translate_sequence(self.cdna_sequence[-3:])
if AA != '*':
logging.warning('%s ends with %s instead of a STOP'%(self.transcript_id, AA))
return self.cdna_sequence
def _get_transcript_start(self):
"""calculate the transcript start from the exons"""
if len(self.exon_list)>0:
if DNA_tools.strand_is_positive(self.strand):
return self.exon_list[0].exon_start
else:
return self.exon_list[-1].exon_start
else:
return None
def _get_transcript_end(self):
"""calculate the transcript end from the exons"""
if len(self.exon_list)>0:
if DNA_tools.strand_is_positive(self.strand):
return self.exon_list[-1].exon_end
else:
return self.exon_list[0].exon_end
else:
return None
def _get_cds_start(self):
"""calculate the cds start from the exons"""
if len(self.exon_list)>0:
if DNA_tools.strand_is_positive(self.strand):
for exon in self.exon_list:
if not exon.exon_cds_start is None:
return exon.exon_cds_start
else:
for exon in self.exon_list[::-1]:
if not exon.exon_cds_start is None:
return exon.exon_cds_start
else:
return None
def _get_cds_end(self):
"""calculate the cds end from the exons"""
if len(self.exon_list)>0:
if DNA_tools.strand_is_positive(self.strand):
for exon in self.exon_list[::-1]:
if not exon.exon_cds_end is None:
return exon.exon_cds_end
else:
for exon in self.exon_list:
if not exon.exon_cds_end is None:
return exon.exon_cds_end
else:
return None
def _update_from_exons(self):
if not self.transcript_start:
self.transcript_start=self._get_transcript_start()
if not self.transcript_end:
self.transcript_end=self._get_transcript_end()
if self.cds_start and self.cds_end:
for exon in self.exon_list:
exon._update_cds_info(self.cds_start, self.cds_end)
if not self.cds_start:
self.cds_start=self._get_cds_start()
if not self.cds_end:
self.cds_end=self._get_cds_end()
class Exon(object):
""""""
def __init__(self, transcript, exon_start, exon_end, exon_cds_start=None, exon_cds_end=None):
self.transcript=transcript
self.exon_start=exon_start
self.exon_end=exon_end
self.exon_cds_start=exon_cds_start
self.exon_cds_end=exon_cds_end
def __str__(self):
return '\t'.join([str(self.transcript.transcript_id), str(self.get_exon_number()),
str(self.exon_start), str(self.exon_end), str(self.exon_cds_start), str(self.exon_cds_end) ])
def __cmp__(self, other):
return self.exon_start-other.exon_start
def get_exon_number(self):
return self.transcript.get_exon_number(self)
def get_length(self):
return self.exon_end-self.exon_start+1
def _update_cds_info(self,cds_start,cds_end):
if self.exon_start>cds_end or self.exon_end<cds_start:
return
if self.exon_start>cds_start:
self.exon_cds_start=self.exon_start
else:
self.exon_cds_start=cds_start
if self.exon_end<cds_end:
self.exon_cds_end=self.exon_end
else:
self.exon_cds_end=cds_end
def get_gene(gene_id,reference,transcript_list=[]):
"""This function generate a gene from a list of transcript"""
key='%s\t%s'%(reference,gene_id)
if not _all_genes.has_key(key):
gene = Gene(gene_id, reference, transcript_list)
_all_genes[key]=gene
else:
gene = _all_genes.get(key)
return gene
def get_transcript(reference, gene_id, transcript_id, strand, transcript_start=None, transcript_end=None, cds_start=None, cds_end=None, exon_list=[]):
"""This function generate a transcript with its list of exons"""
key='%s\t%s'%(reference,transcript_id)
if not _all_transcripts.has_key(key):
gene = get_gene(gene_id, reference,transcript_list=[])
transcript = Transcript(gene, transcript_id, strand, transcript_start, transcript_end, cds_start, cds_end, exon_list)
gene._add_transcript(transcript)
_all_transcripts[key]=transcript
else:
transcript = _all_transcripts.get(key)
return transcript
def get_exon(reference, gene_id, transcript_id, strand, exon_start, exon_end, exon_cds_start=None, exon_cds_end=None,
transcript_start=None, transcript_end=None, cds_start=None, cds_end=None):
""" """
key = '%s\t%s\t%s\t%s'%(reference,transcript_id,exon_start, exon_end)
if not _all_exons.has_key(key):
exon_list=[]
transcript = get_transcript(reference=reference, gene_id=gene_id, transcript_id=transcript_id,
strand=strand, transcript_start=transcript_start, transcript_end=transcript_end,
cds_start=cds_start, cds_end=cds_end, exon_list=exon_list)
exon = Exon(transcript, exon_start, exon_end, exon_cds_start, exon_cds_end)
transcript._add_exon(exon)
_all_exons[key]=exon
else:
exon = _all_exons.get(key)
return exon
def compare_exon_start(x, y):
exon_start1=x[0]
exon_start2=y[0]
if int(exon_start1)>int(exon_start2): return 1
elif int(exon_start1)<int(exon_start2): return -1
else: return 0
def compare_exon_end(x, y):
exon_end1=x[1]
exon_end2=y[1]
if int(exon_end1)>int(exon_end2): return 1
elif int(exon_end1)<int(exon_end2): return -1
else: return 0
def translate_sequence(sequence):
AAs=[]
for pos in range(0,len(sequence)-2,3):
aa=DNA_tools.gencode.get(sequence[pos:pos+3].upper())
if aa:
AAs.append(aa)
else:
AAs.append("X")
if len(sequence)%3==0:
rest=''
else:
rest=sequence[-(len(sequence)%3):]
return (''.join(AAs),rest)
def read_gff_file_old(gff_file):
"""Read gff file expects one feature per line in the following format gff3 format."""
#II Coding_transcript CDS 3802 3984 . + 1 ID=CDS:2L52.1;Note=Zinc finger%2C C2H2 type;Parent=Transcript:2L52.1;status=Partially_confirmed;wormpep=WP:CE32090
nb_exon=0
nb_transcript=0
nb_cds=0
nb_gene=0
all_features_per_transcript={}
all_gene_per_chr={}
exonic_type=['CDS','exon','five_prime_UTR','three_prime_UTR','intron']
transcript_type=['mRNA','ncRNA']
gene_type=['gene']
allowed_type=[]
allowed_type.extend(exonic_type)
allowed_type.extend(transcript_type)
allowed_type.extend(gene_type)
allowed_source=[]
unused_type={}
open_exon=utils_logging.open_input_file(gff_file, pipe=False)
#allowed_source=['Coding_transcript']
for line in open_exon:
if line.startswith("#"):
continue
sp_line=line.strip().split('\t')
if len(sp_line)<2:
continue
chr=sp_line[0]
start=int(sp_line[3])
end=int(sp_line[4])
if sp_line[6]=='.' or DNA_tools.strand_is_positive(sp_line[6]):
strand=1
else:
strand=-1
source=sp_line[1]
#skip thing that are not in the allowed source or not in the allowed type
if len(allowed_source)>0 and source not in allowed_source:
continue
type=sp_line[2]
if type not in allowed_type:
if unused_type.has_key(type):
unused_type[type]+=1
else:
unused_type[type]=1
continue
attributes=sp_line[8].split(';')
feature_id=None
parents=[]
name=None
#Go through the attribute to find out what type of line it is
for attribute in attributes:
attribute_elements=attribute.split('=')
type_attr=attribute_elements[0]
attr_str="=".join(attribute_elements[1:])
if type_attr=='ID':
all_attr=attr_str.split(':')
if len(all_attr)>1:
id_type=all_attr[0]
feature_id =all_attr[1]
else:
feature_id =all_attr[0]
elif type_attr=='Parent':
for attr_elmt in attr_str.split(','):
all_attr = attr_elmt.split(':')
if len(all_attr)==2:
parents.append(all_attr)
elif len(all_attr)==1:
parents.append((None,all_attr[0]))
else:
logging.error("Unrecognized attribute %s"%(attr_elmt))
elif type_attr=='Name':
name=attr_str
else:
pass
if type in exonic_type:
for parent_type, parent_id in parents:
if parent_type is None:
logging.debug("parent type can't be checked for %s."%parent_id)
logging.debug("line %s."%line.strip())
elif parent_type!='Transcript' and parent_type!='mRNA':
logging.error('type %s has parent %s '%(type,parent_type))
logging.error("line %s."%line.strip())
continue
parent_type='mRNA'
info=all_features_per_transcript.get(parent_id)
if info is None:
features_list=[]
all_features_per_transcript[parent_id]=(features_list,None,None,None,None,None,None)
else:
features_list,dummy,dummy,dummy,dummy,dummy,dummy=info
features_list.append((chr,start,end,strand,type,parent_id))
elif type in transcript_type:
if len(parents)>0:
for parent_type, parent_id in parents:
if parent_type and parent_type.lower()!='gene':
logging.error('type %s has parent %s '%(type,parent_type))
logging.error("line %s."%line.strip())
else:
parent_type='gene'
info=all_features_per_transcript.get(feature_id)
if info is None:
all_features_per_transcript[feature_id]=([],chr,start,end,strand,feature_id,parent_id)
else:
features_list,old_chr,old_start,old_end,old_strand,transcript_id,gene_id=info
if (old_chr and old_chr!=chr) or (old_start and old_start!=start) or \
(old_end and old_end!=end) or (old_strand and old_strand!=strand) or \
(transcript_id and transcript_id!=feature_id) or(gene_id and gene_id!=parent_id):
logging.error('%s\t%s\t%s\t%s\t%s\t%s'%(old_chr,old_start,old_end,old_strand,transcript_id,gene_id))
logging.error('%s\t%s\t%s\t%s\t%s\t%s'%(chr,start,end,strand,feature_id,parent_id))
all_features_per_transcript[feature_id]=(features_list,chr,start,end,strand,feature_id,parent_id)
else:
parent_id=feature_id
if not name is None:
parent_id = name
info=all_features_per_transcript.get(feature_id)
if info is None:
all_features_per_transcript[feature_id]=([],chr,start,end,strand,feature_id,parent_id)
else:
features_list,old_chr,old_start,old_end,old_strand,transcript_id,gene_id=info
if (old_chr and old_chr!=chr) or (old_start and old_start!=start) or \
(old_end and old_end!=end) or (old_strand and old_strand!=strand) or \
(transcript_id and transcript_id!=feature_id) or(gene_id and gene_id!=parent_id):
logging.error('%s\t%s\t%s\t%s\t%s\t%s'%(old_chr,old_start,old_end,old_strand,transcript_id,gene_id))
logging.error('%s\t%s\t%s\t%s\t%s\t%s'%(chr,start,end,strand,feature_id,parent_id))
all_features_per_transcript[feature_id]=(features_list,chr,start,end,strand,feature_id,parent_id)
elif type=='gene':
pass
else:
logging.error('Unknown type %s'%(type))
open_exon.close()
for type in unused_type.keys():
logging.warning('%d lines of type %s were unused'%(unused_type.get(type),type))
all_genes_per_id={}
#we've gone througt the whole file and gather exon for all transcript
#now create the structure that we'll pass on
for transcript_id in all_features_per_transcript.keys():
nb_transcript+=1
all_exons=[]
features_list,chr,transcript_start,transcript_end,strand,transcript_id,gene_id=all_features_per_transcript.get(transcript_id)
if transcript_start is None or transcript_end is None or chr is None or strand is None:
logging.error('transcript %s does not have valid information'%transcript_id)
print features_list
print chr,transcript_start,transcript_end,strand,transcript_id,gene_id
continue
all_genes_per_id[gene_id]=1
if gene_id is None:
gene_id=transcript_id
features_list.sort(key=lambda info : info[1] )
exon_start=0
# take the first feature. Hopefully it's not an intron
if len(features_list)==0:
logging.error('transcript %s does not have features'%transcript_id)
print features_list
print chr,transcript_start,transcript_end,strand,transcript_id,gene_id
continue
feature_chr,curr_exon_start, curr_exon_end,strand,curr_type,dummy=features_list[0]
if feature_chr!=chr:
print features_list
print chr,transcript_start,transcript_end,strand,transcript_id,gene_id
if curr_type=='CDS':
cds_start=curr_exon_start
cds_end=curr_exon_end
else:
cds_start=None
cds_end=None
# merge with all the remaining pieces if they're not intron
for feature in features_list[1:]:
feature_chr,start,end,strand,type,parent_id=feature
if feature_chr!=chr:
print features_list
print chr,transcript_start,transcript_end,strand,transcript_id,gene_id
if type!='intron':
if start<=curr_exon_end :
if end > curr_exon_end:
#logging.warning('Overlapping feature %s (%s-%s) and %s (%s-%s) for transcript %s'%(curr_type, curr_exon_start, curr_exon_end,
# type, start, end, transcript_id))
curr_exon_end=end
else:
all_exons.append((curr_exon_start, curr_exon_end, transcript_id, gene_id, transcript_start,
transcript_end, 0, 0, chr, strand))
curr_exon_start=start
curr_exon_end=end
if type=='CDS':
if cds_start is None:
cds_start=start
if cds_end is None or cds_end<end:
cds_end=end
#Add the last one to the array
all_exons.append((curr_exon_start, curr_exon_end, transcript_id, gene_id, transcript_start,
transcript_end, 0, 0, chr, strand))
nb_exon+=len(all_exons)
if cds_start is not None and cds_end is not None:
nb_cds+=1
#Propagate the cds info to all the exon and store them in a per chr storage
for i in range(len(all_exons)):
exon_start, exon_end, transcript_id, gene_id, transcript_start,transcript_end, dummy, dummy, chr, strand=all_exons[i]
all_exons[i]=(exon_start, exon_end, transcript_id, gene_id, transcript_start,
transcript_end, cds_start, cds_end, chr, strand)
exon = get_exon(reference=chr, gene_id=gene_id, transcript_id=transcript_id, strand=strand, exon_start=exon_start, exon_end=exon_end,
transcript_start=transcript_start, transcript_end=transcript_end,cds_start=cds_start, cds_end=cds_end)
gene = exon.transcript.gene
list_gene = all_gene_per_chr.get(chr)
if list_gene is None:
list_gene=set()
all_gene_per_chr[chr]=list_gene
list_gene.add(gene)
nb_gene=len(all_genes_per_id)
logging.info( 'retrieve %s genes %s transcripts with %s cds and %s exons'%(nb_gene,nb_transcript,nb_cds,nb_exon))
return all_gene_per_chr
def read_gff_file(gff_file):
"""Read gff file expects one feature per line in the following format gff3 format."""
#II Coding_transcript CDS 3802 3984 . + 1 ID=CDS:2L52.1;Note=Zinc finger%2C C2H2 type;Parent=Transcript:2L52.1;status=Partially_confirmed;wormpep=WP:CE32090
nb_exon=0
nb_transcript=0
nb_cds=0
nb_gene=0
increment=0
all_gene_per_chr={}
exonic_type=['CDS','exon','five_prime_UTR','three_prime_UTR','intron']
transcript_type=['mRNA','ncRNA','transcript']
gene_type=['gene']
allowed_type=[]
allowed_type.extend(exonic_type)
allowed_type.extend(transcript_type)
allowed_type.extend(gene_type)
allowed_source=[]
unused_type={}
all_features_per_type={}
all_features_per_type_count={}
all_features={}
open_exon=utils_logging.open_input_file(gff_file, pipe=False)
for line in open_exon:
if line.startswith("#"):
continue
sp_line=line.strip().split('\t')
if len(sp_line)<2:
continue
chr=sp_line[0]
start=int(sp_line[3])
end=int(sp_line[4])
if sp_line[6]=='.' or DNA_tools.strand_is_positive(sp_line[6]):
strand=1
else:
strand=-1
source=sp_line[1]
#skip thing that are not in the allowed source or not in the allowed type
if len(allowed_source)>0 and source not in allowed_source:
continue
type=sp_line[2]
if type not in allowed_type:
if unused_type.has_key(type):
unused_type[type]+=1
else:
unused_type[type]=1
continue
attributes=sp_line[8].split(';')
#Go through the attribute to find out what type of line it is
all_attributes={}
for attribute in attributes:
attribute_elements=attribute.split('=')
type_attr=attribute_elements[0].strip()
attr_str="=".join(attribute_elements[1:])
all_attributes[type_attr]=attr_str
children=[]
if all_features.has_key(all_attributes.get('ID')):
dummy_chr,dummy_type,dummy_start,dummy_end,dummy_strand,dummy_source,dummy_all_attributes,children=all_features.get(all_attributes.get('ID'))
if not dummy_chr is None:
logging.error("This feature has been seen more than once, create new id")
logging.error(line.strip())
all_attributes['ID']=all_attributes.get('ID')+"_%s"%(increment)
increment+=1
feature = (chr,type,start,end,strand,source,all_attributes,children)
if not all_features_per_type_count.has_key(type):
all_features_per_type_count[type]=1
else:
all_features_per_type_count[type]+=1
if all_attributes.get('ID') is None and all_attributes.get('Parent') is None:
logging.error("no id nor parent for this attribute")
print line
pprint(all_attributes)
if all_attributes.has_key('ID'):
all_features[all_attributes.get('ID')]=feature
if all_attributes.get('Parent') is None:
if all_features_per_type.has_key(type):
all_features_per_type[type][all_attributes.get('ID')]=feature
else:
all_features_per_type[type]={all_attributes.get('ID'):feature}
else:
parent_feature = all_features.get(all_attributes.get('Parent'))
if parent_feature is not None:
(par_chr,par_type,par_start,par_end,par_strand,par_source,par_all_attributes,par_children) = parent_feature
par_children.append(feature)
else:
all_features[all_attributes.get('Parent')]=(None,None,None,None,None,None,None,[feature])
logging.info('load %s features'%(len(all_features)))
for type in all_features_per_type_count.keys():
logging.info('load %s %s features'%(all_features_per_type_count.get(type), type))
def process_exons(gene_id, transcript_id, all_exonic_features):
all_exonic_features.sort(key=lambda info : info[2] )
exon_start=0
# take the first feature. Hopefully it's not an intron
if len(all_exonic_features)==0:
logging.error('transcript %s does not have features'%transcript_id)
return
reference,curr_type,curr_start,curr_end,strand,source,all_attributes,children=all_exonic_features[0]
if curr_type=='CDS':
cds_start=curr_start
cds_end=curr_end
else:
cds_start=None
cds_end=None
# merge with all the remaining pieces if they're not intron
for feature in all_exonic_features[1:]:
reference,type,start,end,strand,source,all_attributes,children=feature
if type!='intron':
if start<=curr_end+1 :
if end > curr_end:
curr_end=end
else:
get_exon(reference=reference, gene_id=gene_id, transcript_id=transcript_id, strand=strand,
exon_start=curr_start, exon_end=curr_end, exon_cds_start=cds_start,
exon_cds_end=cds_end)
curr_start=start
curr_end=end
cds_start=None
cds_end=None
if type=='CDS':
if cds_start is None:
cds_start=start
if cds_end is None or cds_end<end:
cds_end=end
#Add the last one to the array
exon=get_exon(reference=reference, gene_id=gene_id, transcript_id=transcript_id, strand=strand,
exon_start=curr_start, exon_end=curr_end, exon_cds_start=cds_start,
exon_cds_end=cds_end)
exon.transcript._update_from_exons()
return exon.transcript.gene
def process_transcripts(gene_id, all_transcript_features):
gene = None
for transcript_feature in all_transcript_features:
chr,curr_type,curr_start,curr_end,strand,source,all_attributes,children = transcript_feature
if all_attributes.has_key('ID'):
transcript_id = all_attributes.get('ID')
else:
transcript_id = gene_id
if len(children)==0:
exon=get_exon(reference=chr, gene_id=gene_id, transcript_id=transcript_id, strand=strand,
exon_start=curr_start, exon_end=curr_end, transcript_start=curr_start,
transcript_end=curr_end)
gene = exon.transcript.gene
else:
gene = process_exons(gene_id, transcript_id, all_exonic_features=children)
return gene
def process_genes(feature):
gene=None
(chr,type,gene_start,gene_end,strand,source,all_attributes,children)=feature
if len(children)==0:
#create the transcript/exon from the gene
exon=get_exon(reference=chr, gene_id=gene_id, transcript_id=gene_id, strand=strand,
exon_start=gene_start, exon_end=gene_end, transcript_start=gene_start,
transcript_end=gene_end)
gene=exon.transcript.gene
else:
(dummy,child_type,dummy,dummy,dummy,dummy,dummy,dummy)=children[0]
if child_type in transcript_type:
gene = process_transcripts(gene_id, children)
elif child_type in exonic_type:
gene = process_exons(gene_id, gene_id, children)
return gene
#we've gone througt the whole file and gather all features
#now create the structure that we'll pass on
for gene_type_name in gene_type:
all_genes_features = all_features_per_type.get(gene_type_name)
if not all_genes_features is None:
logging.debug("process %s features %s at the root"%(len(all_genes_features),gene_type_name))
for gene_id in all_genes_features.keys():
gene = process_genes(all_genes_features.get(gene_id))
list_gene = all_gene_per_chr.get(gene.reference)
if list_gene is None:
list_gene=[]
all_gene_per_chr[gene.reference]=list_gene
list_gene.append(gene)
else:
logging.debug("No feature for %s at the root"%(gene_type_name))
for transcript_type_name in transcript_type:
all_transcript_features = all_features_per_type.get(transcript_type_name)
if all_transcript_features:
len_transcript=len(all_transcript_features)
else:
len_transcript=0
logging.debug("process %s features %s at the root"%(len_transcript,transcript_type_name))
if not all_transcript_features is None:
for transcript_id in all_transcript_features.keys():
gene = process_transcripts(transcript_id,[all_transcript_features.get(transcript_id)])
list_gene = all_gene_per_chr.get(gene.reference)
if list_gene is None:
list_gene=[]
all_gene_per_chr[gene.reference]=list_gene
list_gene.append(gene)
for chr in all_gene_per_chr.keys():
list_gene = all_gene_per_chr.get(chr)
for gene in list_gene:
nb_gene+=1
for transcript in gene.transcript_list:
nb_transcript+=1
if transcript.has_cds():
nb_cds+=1
for exon in transcript.exon_list:
nb_exon+=1
if len(unused_type)>0:
logging.info("%s feature type unused in %s"%(len(unused_type), gff_file))
logging.info(' - '.join(unused_type.keys()))
logging.info( 'retrieve %s genes %s transcripts with %s cds and %s exons'%(nb_gene,nb_transcript,nb_cds,nb_exon))
return all_gene_per_chr
def read_gtf_file(gtf_file):
"""Read gtf file expects one feature per line in the following format gff3 format."""
#scaffold00002 Cufflinks transcript 17153 18427 1000 + . gene_id "CUFF.7"; transcript_id "CUFF.7.1";
nb_exon=0
nb_transcript=0
nb_cds=0
nb_gene=0
all_features_per_transcript={}
all_gene_per_chr={}
coding_type=['CDS','stop_codon','start_codon']
exonic_type=['CDS','stop_codon','start_codon','exon','five_prime_UTR','three_prime_UTR','intron']
transcript_type=['mRNA','ncRNA', 'transcript']
gene_type=['gene']
allowed_type=[]
allowed_type.extend(exonic_type)
allowed_type.extend(transcript_type)
allowed_type.extend(gene_type)
allowed_source=[]
unused_type={}
open_exon=utils_logging.open_input_file(gtf_file, pipe=False)
#allowed_source=['Coding_transcript']
for line in open_exon:
if line.startswith("#"):
continue
sp_line=line.strip().split('\t')
if len(sp_line)<2:
continue
reference=sp_line[0]
start=int(sp_line[3])
end=int(sp_line[4])
if sp_line[6]=='.' or DNA_tools.strand_is_positive(sp_line[6]):
strand=1
else:
strand=-1
source=sp_line[1]
#skip thing that are not in the allowed source or not in the allowed type
if len(allowed_source)>0 and source not in allowed_source:
continue
type=sp_line[2]
if type not in allowed_type:
if unused_type.has_key(type):
unused_type[type]+=1
else:
unused_type[type]=1
continue
attributes=sp_line[8].split(';')
gene_id=None
transcript_id=None
name=None
#Go through the attribute to get gene_id and transcript_id
feature={'reference':reference,'ft_start':start,'ft_end':end,'ft_strand':strand,
'ft_type':type}
for attribute in attributes:
attribute = attribute.strip()
attribute_elements=attribute.split(" ")
type_attr=attribute_elements[0]
attr_str=" ".join(attribute_elements[1:])
attr_str=attr_str.replace('"','')
feature[type_attr]=attr_str.strip()
if not feature.has_key('gene_id') or not feature.has_key('transcript_id') :
logging.error("line %s is missing gene_id or transcript_id"%line.strip())
continue
if type in exonic_type:
info=all_features_per_transcript.get(feature['transcript_id'])
if info is None:
features_list=[]
all_features_per_transcript[feature['transcript_id']]=(features_list,None,None,None,None,feature['transcript_id'],feature['gene_id'])
else:
features_list,dummy,dummy,dummy,dummy,dummy,dummy=info
features_list.append(feature)
elif type in transcript_type:
info=all_features_per_transcript.get(feature['transcript_id'])
if info is None:
all_features_per_transcript[feature['transcript_id']]=([],reference,start,end,strand,feature['transcript_id'],feature['gene_id'])
else:
features_list,old_chr,old_start,old_end,old_strand,old_transcript_id,old_gene_id=info
all_features_per_transcript[transcript_id]=(features_list,reference,start,end,strand,feature['transcript_id'],feature['gene_id'])
elif type=='gene':
#not bothering with gene features yet
pass
else:
logging.error('Unknown type %s'%(type))
open_exon.close()
for type in unused_type.keys():
logging.warning('%d lines of type %s were unused'%(unused_type.get(type),type))
all_genes_per_id={}
#we've gone through the whole file and gather features for all transcripts
#now create the structure that we'll pass on
for transcript_id in all_features_per_transcript.keys():
nb_transcript+=1
exon_start=None
exon_end=None
exon_cds_start = None
exon_cds_end = None
features_list, reference, transcript_start, transcript_end, strand, transcript_id, gene_id = all_features_per_transcript.get(transcript_id)
if len(features_list)>0:
features_list.sort(key=lambda info : info['ft_start'] )
# take the first feature. Hopefully it's not an intron
feature = features_list[0]
if not reference:
reference=feature['reference']
if not gene_id:
gene_id=feature['gene_id']
if not strand:
strand=feature['ft_strand']
exon_start=feature['ft_start']
exon_end=feature['ft_end']
if feature['ft_type'] in coding_type:
exon_cds_start = feature['ft_start']
exon_cds_end = feature['ft_end']
else:
exon_cds_start = None
exon_cds_end = None
transcript=get_transcript(reference=reference, gene_id=gene_id, transcript_id=transcript_id,
strand=strand, transcript_start=transcript_start, transcript_end=transcript_end)
if len(features_list)>1:
# merge with all the remaining pieces if they're not intron
for feature in features_list[1:]:
if reference and feature['reference']!=reference:
logging.error("feature has a different reference than parent %s -- %s "%(feature['reference'],reference))
logging.error(str(feature))
logging.error(str((reference,transcript_start,transcript_end,strand,transcript_id,gene_id)))
if type!='intron':
if feature['ft_start'] <= exon_end :
if feature['ft_end'] > exon_end:
#logging.warning('Overlapping feature %s (%s-%s) and %s (%s-%s) for transcript %s'%(curr_type, curr_exon_start, curr_exon_end,
# type, start, end, transcript_id))
exon_end = feature['ft_end']
else:
exon=get_exon(reference=reference, gene_id=gene_id, transcript_id=transcript_id,strand=strand,
exon_start=exon_start, exon_end=exon_end, exon_cds_start=exon_cds_start,
exon_cds_end=exon_cds_end, transcript_start=transcript_start, transcript_end=transcript_end)
exon_cds_start=None
exon_cds_end=None
exon_start=feature['ft_start']
exon_end=feature['ft_end']
nb_exon+=1
if feature['ft_type'] in coding_type:
if exon_cds_start is None:
exon_cds_start=feature['ft_start']
if exon_cds_end is None or exon_cds_end<feature['ft_end']:
exon_cds_end=feature['ft_end']
#Add the last one to the array
exon=get_exon(reference=reference, gene_id=gene_id, transcript_id=transcript_id, strand=strand,
exon_start=exon_start, exon_end=exon_end, exon_cds_start=exon_cds_start,
exon_cds_end=exon_cds_end, transcript_start=transcript_start, transcript_end=transcript_end)
nb_exon+=1
transcript=exon.transcript
transcript._update_from_exons()
if transcript.cds_start:
nb_cds+=1
gene = transcript.gene
all_genes_per_id[gene]=1
list_gene = all_gene_per_chr.get(reference)
if list_gene is None:
list_gene=set()
all_gene_per_chr[reference]=list_gene
list_gene.add(gene)
nb_gene=len(all_genes_per_id)
logging.info( 'retrieve %s genes %s transcripts with %s cds and %s exons'%(nb_gene,nb_transcript,nb_cds,nb_exon))
return all_gene_per_chr
def read_bed_file(bed_file):
"""Read bed file expects transcript per line in the following format:
chromosome, start, end start end score strand frame group
Only the chromosome, start, end, name, strand and group are used, the other are ignored."""
all_exons_per_chr={}
all_gene_per_chr={}
all_gene_id_seen={}
record_number=1
nb_transcript=0
nb_cds=0
nb_exon=0
#chr1 16145919 16146865 ASO3599 0 + 16145919 16146865 0 1 946 0
#chr1 16660029 16667563 ASO1842 0 + 16660029 16667563 0 2 2340,905 0,6629
open_exon=utils_logging.open_input_file(bed_file, pipe=False)
line = open_exon.readline()
while line.startswith("#") or line.startswith("browser") or line.startswith("track"):
line = open_exon.readline()
while line:
sp_line=line.strip().split('\t')
if len(sp_line)>3:
gene_id=sp_line[3]
time_seen=all_gene_id_seen.get(gene_id)
if not time_seen:
time_seen=0
time_seen+=1
all_gene_id_seen[gene_id]=time_seen
transcript_id=sp_line[3]+"_%s"%time_seen
else:
gene_id='record_%s'%record_number
transcript_id='record_%s'%record_number
record_number+=1
nb_transcript+=1
chr=sp_line[0]
#Assuming bed file from UCSC so
transcript_start=int(sp_line[1])+1
transcript_end=int(sp_line[2])
if len(sp_line)>5 and not DNA_tools.strand_is_positive(sp_line[5]):
strand=-1
else:
strand=1
if len(sp_line)>7:
cds_start=int(sp_line[6])+1
cds_end=int(sp_line[7])
nb_cds+=1
else:
cds_start=None
cds_end=None
if len(sp_line)>11:
if sp_line[10].endswith(','):
block_sizes=sp_line[10][:-1].split(',')
else:
block_sizes=sp_line[10].split(',')
if sp_line[11].endswith(','):
block_starts=sp_line[11][:-1].split(',')
else:
block_starts=sp_line[11].split(',')
else:
block_sizes=[transcript_end-transcript_start+1]
block_starts=[0]
for i,start in enumerate(block_starts):
nb_exon+=1
exon_start=transcript_start+int(start)
exon_end=transcript_start+int(start)+int(block_sizes[i])-1
exon=get_exon(reference=chr, gene_id=gene_id, transcript_id=transcript_id, strand=strand,
exon_start=exon_start, exon_end=exon_end, transcript_start=transcript_start,
transcript_end=transcript_end, cds_start=cds_start, cds_end=cds_end)
gene = exon.transcript.gene
list_gene = all_gene_per_chr.get(chr)
if list_gene is None:
list_gene=set()
all_gene_per_chr[chr]=list_gene
list_gene.add(gene)
line = open_exon.readline()
nb_gene=0
logging.info( 'retrieve %s genes %s transcripts with %s cds and %s exons'%(nb_gene,nb_transcript,nb_cds,nb_exon))
return all_exons_per_chr
def read_ucsc_file_not_knowngene(ucsc_file):
return read_ucsc_file(ucsc_file,knowngene=False)
def read_ucsc_file(ucsc_file,knowngene=True):
"""Read ucsc file expects transcript per line in the following format:
tanscript id chromosome, strand, transcript_start, transcript_end cds_start cds_end number_exon,
coma_separated_exons_start, coma_separated_exons_end proteinID, and alignID.
The last two column are not used.
The coordinate are assumed to be 0-based"""
all_genes_per_chr={}
all_gene_id_seen={}
record_number=1
#uc007aet.1 chr1 - 3195984 3205713 3195984 3195984 2 3195984,3203519, 3197398,3205713, uc007aet.1
#uc007aeu.1 chr1 - 3204562 3661579 3206102 3661429 3 3204562,3411782,3660632, 3207049,3411982,3661579, Q5GH67 uc007aeu.1
#uc007aev.1 chr1 - 3638391 3648985 3638391 3638391 2 3638391,3648927, 3640590,3648985, uc007aev.1
nb_transcript=0
nb_cds=0
nb_exon=0
all_genes=set()
open_exon=utils_logging.open_input_file(ucsc_file, pipe=False)
line = open_exon.readline()
all_transcript_id={}
while line.startswith("#") or line.startswith("browser") or line.startswith("track"):
line = open_exon.readline()
while line:
nb_transcript+=1
sp_line=line.strip().split('\t')
if not knowngene:
sp_line=sp_line[1:]
if len(sp_line)>=12:
gene_id=sp_line[11]
all_genes.add(gene_id)
else:
gene_id=sp_line[0]
all_genes.add(gene_id)
#The transcript of UCSC databases are not always unique
#Add a dot plus a digit as it's done with refseq
transcript_id=sp_line[0]
i=0
while all_transcript_id.get(transcript_id) is not None:
i+=1
transcript_id=sp_line[0]+'.%s'%i
all_transcript_id[transcript_id]=1
chr=sp_line[1]
if DNA_tools.strand_is_positive(sp_line[2]):
strand=1
else:
strand=-1
transcript_start=int(sp_line[3])+1
transcript_end=int(sp_line[4])
#1354 ENST00000444633 chr10 + 100807395 100808076 100808076 100808076 1 100807395, 100808076, 0 ENSG00000234109 none none -1,
no_cds=False
if len(sp_line)>=14:
if sp_line[13]=='none' or sp_line[14]=='none' or sp_line[13]=='unk' or sp_line[14]=='unk':
no_cds=True
if sp_line[5].isdigit() and not no_cds:
nb_cds+=1
cds_start=int(sp_line[5])+1
else:
cds_start=None
if sp_line[6].isdigit() and not no_cds:
cds_end=int(sp_line[6])
else:
cds_end=None
if sp_line[8].endswith(','):
exons_starts=sp_line[8][:-1].split(',')
else:
exons_starts=sp_line[8].split(',')
if sp_line[8].endswith(','):
exons_ends=sp_line[9][:-1].split(',')
else:
exons_ends=sp_line[9].split(',')
transcript = get_transcript(reference=chr, gene_id=gene_id, transcript_id=transcript_id, strand=strand,
transcript_start=transcript_start, transcript_end=transcript_end, cds_start=cds_start, cds_end=cds_end)
for i,exon_start in enumerate(exons_starts):
nb_exon+=1
exon_start=int(exon_start)+1
exon_end=int(exons_ends[i])
list_gene=all_genes_per_chr.get(chr)
if not list_gene:
list_gene=set()
all_genes_per_chr[chr]=list_gene
get_exon(reference=chr, gene_id=gene_id, transcript_id=transcript_id, strand=strand,
exon_start=exon_start, exon_end=exon_end)
transcript._update_from_exons()
gene = transcript.gene
list_gene.add(gene)
line = open_exon.readline()
logging.info( 'retrieve %s genes %s transcripts with %s cds and %s exons'%(len(all_genes), nb_transcript, nb_cds,nb_exon))
return all_genes_per_chr
def recognize_format(input_file):
"""Recognize the format of a file in between gff, bed, and exon."""
open_file=utils_logging.open_input_file(input_file, pipe=False)
line = open_file.readline()
while line and (line.startswith("#") or line.startswith("browser") or line.startswith("track")):
line = open_file.readline()
#chr1 16145919 16146865 ASO3599 0 + 16145919 16146865 0 1 946 0
# bed has 3 mandatory field
open_file.close()
sp_line = line.strip().split('\t')
if len(sp_line)>=3 and sp_line[1].isdigit() and sp_line[2].isdigit() and int(sp_line[1])<int(sp_line[2]):
#probably bed
is_bed=True
if len(sp_line)>=6 and sp_line[5] not in ['+', '-', '.']:
# not bed
is_bed=False
if len(sp_line)>=12 :
if sp_line[10].endswith(','):
block_sizes=sp_line[10][:-1].split(',')
else:
block_sizes=sp_line[10].split(',')
if sp_line[11].endswith(','):
block_starts=sp_line[11][:-1].split(',')
else:
block_starts=sp_line[11].split(',')
if len(block_sizes)==len(block_starts):
for block_size in block_sizes:
if not block_size.isdigit():
is_bed=False
for block_start in block_starts:
if not block_start.isdigit():
is_bed=False
else:
is_bed=False
if is_bed:
logging.info("Recognize bed file for %s"%input_file)
return read_bed_file
#chr22 TeleGene promoter 10010000 10010100 900 + . TG1
# gff has 9 mandatory field
if len(sp_line)==9 and sp_line[3].isdigit() and sp_line[4].isdigit() and int(sp_line[3])<int(sp_line[4]) and sp_line[6] in ['+','-','.']:
#probably gff
#check if gtf
if sp_line[8].find('gene_id')>=0 and sp_line[8].find('transcript_id')>=0:
logging.info("Recognize gtf file for %s"%input_file)
return read_gtf_file
#Now I'm sure it's gff
logging.info("Recognize gff file for %s"%input_file)
return read_gff_file
#uc007aeu.1 chr1 - 3204562 3661579 3206102 3661429 3 3204562,3411782,3660632, 3207049,3411982,3661579, Q5GH67 uc007aeu.1
# ucsc has at least 10 mandatory field
if len(sp_line)>9 and sp_line[2] in ['-','+'] and sp_line[3].isdigit() and sp_line[4].isdigit() and int(sp_line[3])<int(sp_line[4]):
#probably ucsc
logging.info("Recognize ucsc file for %s"%input_file)
return read_ucsc_file
elif len(sp_line)>10 and sp_line[3] in ['-','+'] and sp_line[4].isdigit() and sp_line[5].isdigit() and int(sp_line[4])<int(sp_line[5]):
#probably ucsc
logging.info("Recognize ucsc (not known gene) file for %s"%input_file)
return read_ucsc_file_not_knowngene
logging.error("Unrecognize format for file %s"%input_file)
return None
class Annotation_Retriver():
"""This class can read from a from a file.
It's used in Base_annotator to have a common way getting the exon information.
database structure is defined in gene_annotation and exon file format is defined in read_exon_file's doc
"""
def __init__(self, annotation_file=None):
self.all_gene_per_chr=None
self.source=None
if not annotation_file:
logging.error("No annotation resource given")
return
elif annotation_file:
self.source=annotation_file
reader=recognize_format(annotation_file)
if reader is not None:
self.all_gene_per_chr=reader(annotation_file)
def get_annotation_from_chr(self,chr):
if self.all_gene_per_chr:
return self.all_gene_per_chr.get(chr)
def get_chr_names(self):
if self.all_gene_per_chr:
return self.all_gene_per_chr.keys()
def get_source(self):
return self.source
class Exon_annotation_Retriver():
"""This class can read exon information from a database or from a file.
It's used in Base_annotator to have a common way getting the exon information.
database structure is defined in gene_annotation and exon file format is defined in read_exon_file's doc
"""
def __init__(self, annotation_file=None, annotation_database=None):
self.anno_database_retriver=None
if not annotation_file:
logging.error("No annotation resource given")
return
elif annotation_database:
self.source=annotation_database
logging.critical('The functionality of Exon_annotation_Retriver for databases is deprecated and will not work anymore')
sys.exit(0)
elif annotation_file:
self.annotation_reader = Annotation_Retriver(annotation_file)
def get_annotation_from_chr(self,chr):
if self.annotation_reader:
all_exons=[]
all_genes= self.annotation_reader.get_annotation_from_chr(chr)
if all_genes:
for gene in all_genes:
for transcript in gene.transcript_list:
for exon in transcript.exon_list:
all_exons.append((exon.exon_start, exon.exon_end, transcript.transcript_id,
gene.gene_id, transcript.transcript_start, transcript.transcript_end,
transcript.cds_start, transcript.cds_end, gene.reference, transcript.strand))
return all_exons
def get_chr_names(self):
if self.annotation_reader:
return self.annotation_reader.get_chr_names()
def get_source(self):
return self.annotation_reader.get_source()
if __name__=="1__main__":
utils_logging.init_logging()
annotation_file='/home/tcezard/genomes/homo_sapiens/hg19/annotations/refGene_2011_05_27.txt.gz'
#annotation_file='/home/tcezard/projects/2010053_Tom_Little_RNAseq_2/reference/daphmagna_201104m8.pasaupdate.gff.gz'
#annotation_file='/home/tcezard/projects/2010053_Tom_Little_RNAseq_2/for tim/dmag_ep24augmap2an2.gff'
annotation_reader = Annotation_Retriver(annotation_file=annotation_file)
gene_list = annotation_reader.get_annotation_from_chr('chr1')
#annotations = annotation_reader.get_annotation_from_chr('scaffold00002')
for gene in gene_list:
for transcript in gene.transcript_list:
print transcript
if __name__=="1__main__":
from utils.GenomeLoader import GenomeLoader
utils_logging.init_logging()
annotation_file='/home/tcezard/test_annotation.txt'
genome_file='/home/tcezard/genomes/homo_sapiens/hg19/hg19.fa'
genome_loader = GenomeLoader(genome_file)
annotation_reader = Annotation_Retriver(annotation_file=annotation_file)
gene_list = annotation_reader.get_annotation_from_chr('chr1')
header, chr1_sequence = genome_loader.get_chr('chr1')
#annotations = annotation_reader.get_annotation_from_chr('scaffold00002')
for gene in gene_list:
for transcript in gene.transcript_list:
print transcript.transcript_id
cdna_sequence = transcript.get_cdna_sequence(chr1_sequence)
pos_in_cdna = transcript.get_position_in_cdna(894618)
if pos_in_cdna:
print pos_in_cdna, cdna_sequence[pos_in_cdna-1]
pos_in_cdna = transcript.get_position_in_cdna(894617)
if pos_in_cdna:
print pos_in_cdna, cdna_sequence[pos_in_cdna-1]
pos_in_cdna = transcript.get_position_in_cdna(894616)
if pos_in_cdna:
print pos_in_cdna,cdna_sequence[pos_in_cdna-1]
if __name__=="1__main__":
utils_logging.init_logging()
gene_id='NR_024540'
reference='chr1'
transcript_id='NR_024540'
transcript_start='14361'
transcript_end='29370'
cds_start='29370'
cds_end='29370'
strand='-'
exon_number='1'
exon_start='29320'
exon_end='29370'
print get_exon(gene_id, reference, transcript_id, transcript_start, transcript_end, cds_start, cds_end, strand, exon_start, exon_end)
if __name__=="__main__":
utils_logging.init_logging()
sys.argv[1]
read_gff_file(sys.argv[1])
read_gff_file_old(sys.argv[1])
|
{
"content_hash": "7089fd51d3a7204d46717785c6b874c3",
"timestamp": "",
"source": "github",
"line_count": 1420,
"max_line_length": 199,
"avg_line_length": 44.8830985915493,
"alnum_prop": 0.5662911475821383,
"repo_name": "tcezard/RADmapper",
"id": "1334b737b5e30b0a4b85b19067741ecb36c3c0aa",
"size": "63734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/IO_interface/annotation_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "609055"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.