content
stringlengths 5
1.05M
|
---|
def get_smaller_right(arr):
smaller_right_arr = list()
for i in range(len(arr)):
smaller_count = 0
for j in range(i + 1, len(arr)):
if arr[j] < arr[i]:
smaller_count += 1
smaller_right_arr.append(smaller_count)
return smaller_right_arr
# Tests
assert get_smaller_right([3, 4, 9, 6, 1]) == [1, 1, 2, 1, 0]
|
from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
def binarySearch(l: int, r: int, first: bool) -> int:
nonlocal target
while l <= r:
mid = l + (r - l) // 2
if nums[mid] < target:
l = mid + 1
elif nums[mid] > target:
r = mid - 1
else: # == target
if first: # search for the first position
if mid == 0 or nums[mid - 1] != target:
return mid
else:
r = mid - 1
else: # search for the last position
if mid == len(nums) - 1 or nums[mid + 1] != target:
return mid
else:
l = mid + 1
return -1
first = binarySearch(0, len(nums) - 1, True)
if first == -1:
return [-1, -1]
return [first, binarySearch(first, len(nums) - 1, False)]
# TESTS
for nums, target, expected in [
([5, 7, 7, 8, 8, 10], 8, [3, 4]),
([5, 7, 7, 8, 8, 10], 6, [-1, -1]),
([], 0, [-1, -1]),
([5, 7, 7, 8, 8, 10, 10, 10], 5, [0, 0]),
([5, 7, 7, 8, 8, 10, 10, 10], 10, [5, 7]),
]:
sol = Solution()
actual = sol.searchRange(nums, target)
print("Search", target, "in", nums, "->", actual)
assert actual == expected
|
"""
The Narrator for the screenplay, who informs the audience what the actors are
doing. The Narrator's microphone is modular, allowing for any number of
adapters to be applied. Adapters must follow the Adapter protocol outlined in
screenpy.protocols.
"""
from contextlib import contextmanager
from copy import deepcopy
from typing import (
Callable,
ContextManager,
Dict,
Generator,
List,
Optional,
Tuple,
Union,
)
from screenpy.protocols import Adapter
# pylint: disable=stop-iteration-return
# The above pylint warning may be a false-positive since Narrator calls `next`
# directly instead of iterating over the generators.
Kwargs = Union[Callable, str]
BackedUpNarration = Tuple[str, Dict[str, Kwargs], int]
ChainedNarrations = List[Tuple[str, Dict[str, Kwargs], List]]
Entangled = Tuple[Callable, List[Generator]]
# Levels for gravitas
AIRY = "airy"
LIGHT = "light"
NORMAL = "normal"
HEAVY = "heavy"
EXTREME = "extreme"
# Chaining directions
FORWARD = "forward"
BACKWARD = "backward"
def _chainify(narrations: List[BackedUpNarration]) -> ChainedNarrations:
"""Organize backed-up narrations into an encapsulation chain.
This helper function takes a flat list of narrations and exit levels and
organizes it into an encapsulation structure. For example:
[(kwargs1, 1), (kwargs2, 2), (kwargs3, 2), (kwargs4, 3)]
=>
[(kwargs1, [(kwargs2, []), (kwargs3, [(kwargs4, [])])])]
This encapsulation structure can be used by _entangle_chain to correctly
entangle the backed-up narrations, so each adapter handles them properly.
This approach was created with help from @Doctor#7942 on Discord. Thanks!
"""
result: ChainedNarrations = []
stack = [result]
# the first narration will have the relative base exit level
normalizer = (narrations[0][-1] - 1) if narrations else 0
for channel, channel_kwargs, exit_level in narrations:
normalized_exit_level = exit_level - normalizer
if normalized_exit_level == len(stack):
# this function is a sibling of the previous one
stack[-1].append((channel, channel_kwargs, []))
elif normalized_exit_level > len(stack):
# surface the latest function's child list and append to that
child_list = stack[-1][-1][-1]
stack.append(child_list)
stack[-1].append((channel, channel_kwargs, []))
else:
# we've dropped down one or more levels, go back
stack = stack[: -(len(stack) - normalized_exit_level)]
stack[-1].append((channel, channel_kwargs, []))
return result
class Narrator:
"""The narrator conveys the story to the audience."""
def __init__(self, adapters: Optional[List[Adapter]] = None) -> None:
self.adapters: List[Adapter] = adapters or []
self.on_air = True
self.cable_kinked = False
self.backed_up_narrations: List[BackedUpNarration] = []
self.exit_level = 1
self.kink_exit_level = 0
@contextmanager
def off_the_air(self) -> Generator:
"""Turns off narration completely during this context."""
self.on_air = False
yield
self.on_air = True
@contextmanager
def mic_cable_kinked(self) -> Generator:
"""Put a kink in the microphone line, storing narrations.
Once this context is left, all stored narrations will be flushed. You
can call clear_backup to drop all stored narrations, or flush_backup
to log them all (and clear them afterward).
"""
previous_kink_level = self.kink_exit_level
self.cable_kinked = True
self.kink_exit_level = self.exit_level
yield
self.flush_backup()
self.kink_exit_level = previous_kink_level
self.cable_kinked = self.kink_exit_level == 1
def clear_backup(self) -> None:
"""Clear the backed-up narrations from a kinked cable."""
self._pop_backups_from_exit_level(self.kink_exit_level)
@contextmanager
def _increase_exit_level(self) -> Generator:
"""Increase the exit level for kinked narrations."""
self.exit_level += 1
yield
self.exit_level -= 1
def _pop_backups_from_exit_level(self, level: int) -> List[BackedUpNarration]:
"""Pop all backed-up narrations starting at the given level."""
keep_narrations = []
remove_narrations = []
for narration in self.backed_up_narrations:
if narration[-1] >= level:
remove_narrations.append(narration)
else:
keep_narrations.append(narration)
self.backed_up_narrations = keep_narrations
return remove_narrations
def flush_backup(self) -> None:
"""Let all the backed-up narration flow through the kink."""
kinked_narrations = self._pop_backups_from_exit_level(self.kink_exit_level)
narrations = _chainify(kinked_narrations)
for adapter in self.adapters:
full_narration_func = self._entangle_chain(adapter, deepcopy(narrations))
full_narration_func()
self.clear_backup()
@contextmanager
def _dummy_entangle(self, func: Callable) -> Generator:
"""Give back something that looks like an entangled func.
If the narrator's mic cable is kinked or they are off-air, we still
need to give back a context-managed function. We increase the exit
level as well, for a future un-kinking of the mic cable.
"""
with self._increase_exit_level():
yield func
def _entangle_chain(self, adapter: Adapter, chain: ChainedNarrations) -> Callable:
"""Mimic narration entanglement from a backed-up narration chain."""
roots: List[Callable] = []
for channel, channel_kwargs, enclosed in chain:
with self._entangle_func(channel, [adapter], **channel_kwargs) as root:
if enclosed:
for _, enclosed_kwargs, _ in enclosed:
enclosed_kwargs["func"] = root
self._entangle_chain(adapter, enclosed)
roots.append(root)
return lambda: [root() for root in roots]
@contextmanager
def _entangle_func(
self,
channel: str,
adapters: Optional[List[Adapter]] = None,
**channel_kwargs: Kwargs
) -> Generator:
"""Entangle the function in the adapters' contexts, decorations, etc.
Each adapter yields the function back, potentially applying its own
context or decorators. We extract the function with that context still
intact. We will need to close the context as we leave, so we store
each level of entanglement to leave later.
"""
if adapters is None:
adapters = self.adapters
exits = []
enclosed_func = channel_kwargs["func"]
for adapter in adapters:
channel_kwargs["func"] = enclosed_func
closure = getattr(adapter, channel)(**channel_kwargs)
enclosed_func = next(closure)
exits.append(closure)
try:
yield enclosed_func
finally:
for exit_ in exits:
# close the closures
next(exit_, None)
def narrate(self, channel: str, **kwargs: Union[Kwargs, None]) -> ContextManager:
"""Speak the message into the microphone plugged in to all the adapters."""
channel_kws = {key: value for key, value in kwargs.items() if value is not None}
if not callable(channel_kws["func"]):
raise TypeError('Narration "func" is not callable.')
if self.cable_kinked:
enclosed_func = self._dummy_entangle(channel_kws["func"])
channel_kws["func"] = lambda: "overflow"
self.backed_up_narrations.append((channel, channel_kws, self.exit_level))
else:
enclosed_func = self._entangle_func(channel, **channel_kws) # type: ignore
return enclosed_func
def announcing_the_act(
self, func: Callable, line: str, gravitas: Optional[str] = None
) -> ContextManager:
"""Narrate the title of the act."""
if not self.on_air:
return self._dummy_entangle(func)
return self.narrate("act", func=func, line=line, gravitas=gravitas)
def setting_the_scene(
self, func: Callable, line: str, gravitas: Optional[str] = None
) -> ContextManager:
"""Narrate the title of the scene."""
if not self.on_air:
return self._dummy_entangle(func)
return self.narrate("scene", func=func, line=line, gravitas=gravitas)
def stating_a_beat(self, func: Callable, line: str) -> ContextManager:
"""Narrate an emotional beat."""
if not self.on_air:
return self._dummy_entangle(func)
return self.narrate("beat", func=func, line=line)
def whispering_an_aside(self, line: str) -> ContextManager:
"""Narrate a conspiratorial aside (as a stage-whisper)."""
if not self.on_air:
return self._dummy_entangle(lambda: "<static>")
return self.narrate("aside", func=lambda: "ssh", line=line)
|
from __future__ import annotations
from ctc.protocols import uniswap_v2_utils
from ctc import spec
async def async_get_pool_swaps(
pool_address: spec.Address,
start_block: spec.BlockNumberReference | None = None,
end_block: spec.BlockNumberReference | None = None,
replace_symbols: bool = False,
normalize: bool = True,
) -> spec.DataFrame:
return await uniswap_v2_utils.async_get_pool_swaps(
pool_address=pool_address,
start_block=start_block,
end_block=end_block,
replace_symbols=replace_symbols,
normalize=normalize,
)
|
from flask import Flask
import os
app = Flask(__name__)
import testsite.views |
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
import psycopg2
import datetime
from database import database
def keep_alive(request):
"""
keep_alive process the keep alive route /keep_alive
:param request:
:return:
"""
if request.method == "OPTIONS":
return Response(status=200, headers=cors_headers)
else:
return Response(status=200, headers=cors_headers,
body=datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S"))
def parking_spots(request):
if request.method == 'GET':
params = request.params.dict_of_lists()
lat = params['lat']
long = params['long']
radius = params['radius']
cursor.execute(
"select rows_to_json from (select * from ridecell.parking_spots where %s < distance(%s, %s, %s, %s));",
radius, lat, long)
rows = cursor.fetchall()
return Response(status=200, headers=cors_headers, json_body=rows[0][0])
else:
return Response(status=400, headers=cors_headers)
def reservations(request):
if request.method == 'POST':
json_data = request.json()
try:
cursor.exectute("insert into ridecell.parking_spot_reservation values (spot_id, user_id, duration values(%s, 1,'[%s,%s] );",
json_data['spot_id'], json_data['start_time'], json_data['end_time'])
except psycopg2.Error as e:
pass
else:
return Response(status=400, headers=cors_headers)
cursor = None
cors_headers = {"Access-Control-Allow-Origin": "*"}
if __name__ == '__main__':
"""
Connect to the database
Define the routes and supporting views
"""
cursor = database()
config = Configurator()
config.add_route('keep_alive', '/keep_alive')
config.add_view(keep_alive, route_name='keep_alive')
config.add_route('parking_spots', '/parking_spots')
config.add_view(parking_spots, route_name='parking_spots')
config.add_route('reservations', '/reservations')
config.add_view(reservations, route_name='reservations')
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 8081, app)
server.serve_forever()
|
import numpy as np
from scipy.optimize import fsolve
def Force(E_interp_B1,E_interp_B2,R_B1,R_B2,M,N,delta,n):
'''
Computes the normal force between two contacting, elastically anisotropic bodies
Parameters:
E_interp_B1: interpolant of values of the plane strain modulus for the material
corresponding to body B1
E_interp_B2: interpolant of values of the plane strain modulus for the material
corresponding to body B2
R_B1: rotation matrix describing the orientation of body B1
R_B2: rotation matrix describing the orientation of body B2
M,N: coefficients of the gap function between bodies B1 and B2
delta: overlap distance between bodies B1 and B2
n: components of contact normal direction, given as a 3-by-1 2D array
Returns:
F: normal contact force between bodies B1 and B2
'''
# Get composite plane strain modulus
Ec = CompositePlainStrainModulus(E_interp_B1,E_interp_B2,R_B1,R_B2,n)
# Compute contact force
if np.isclose(M,N):
F = 4/(3*np.sqrt(2))*Ec*M**(-1/2)*delta**(3/2)
else:
e = Eccentricity(M,N)
F = 4*np.pi/3*I1(0,e)**(1/2)/I0(0,e)**(3/2)*Ec*M**(-1/2)*delta**(3/2)
return F
def CompositePlainStrainModulus(E_interp_B1,E_interp_B2,R_B1,R_B2,n):
'''
Retrieves the composite plain strain modulus for two contacting
bodies B1 and B2 from precomputed interpolants of the plane strain moduli.
This is the algorithm described in Algorithm 3 in Mowlavi & Kamrin (2021),
with the exception that the look-up tables are replaced by interpolants.
Parameters:
E_interp_B1: interpolant of values of the plane strain modulus for the material
corresponding to body B1
E_interp_B2: interpolant of values of the plane strain modulus for the material
corresponding to body B2
R_B1: rotation matrix describing the orientation of body B1
R_B2: rotation matrix describing the orientation of body B2
n: components of contact normal direction, given as a 3-by-1 2D array
Returns:
Ec: composite plain strain modulus
'''
# Calculate plain strain modulus for B1 and B2 and store in a Python
# dictionary E = {'B1': E_B1, 'B2': E_B2}
E = {}
for body in ['B1','B2']:
# Retrieve corresponding look-up table and rotation matrix
E_interp,R = (E_interp_B1,R_B1) if body=='B1' else (E_interp_B2,R_B2)
# Ensure that n is unit length
n = n/np.linalg.norm(n)
# Transform the coordinates of n from global to body basis
n = R.T@n
# Convert these coordinates to Euler angles
a = np.arctan2(n[1],n[0])%(2*np.pi)
b = np.arccos(n[2])
# Interpolate corresponding plane strain modulus from look-up table
E[body] = np.asscalar(E_interp(a,b))
# Compute the composite plain strain modulus from the stored values
return 1/(1/E['B1'] + 1/E['B2'])
def Eccentricity(M,N):
fun = lambda e: N/M - I2(0,e)/I1(0,e)
e0 = 2*np.sqrt(1-M/N)/np.sqrt(3)
e = np.asscalar(fsolve(fun,e0))
return e
def I0(m,e):
tt = np.linspace(0,np.pi,100)
ff = np.cos(2*m*tt)/np.sqrt(1-e**2*np.cos(tt)**2)
return np.trapz(ff,tt)
def I1(m,e):
tt = np.linspace(0,np.pi,100)
ff = np.sin(tt)**2*np.cos(2*m*tt)/(1-e**2*np.cos(tt)**2)**(3/2)
return np.trapz(ff,tt)
def I2(m,e):
tt = np.linspace(0,np.pi,100)
ff = np.cos(tt)**2*np.cos(2*m*tt)/(1-e**2*np.cos(tt)**2)**(3/2)
return np.trapz(ff,tt) |
from __future__ import print_function
import os, boto3, json, logging
lambda_client = boto3.client('lambda')
#logging configuration
logger = logging.getLogger()
logger.setLevel(logging.INFO)
#Read environment Variables
gatewayqueue = os.environ.get("GatewayQueue")
vpcid_hub = os.environ.get("HubVPC")
gwsize_spoke = os.environ.get("SpokeGWSize")
gatewaytopic = os.environ.get("GatewayTopic")
spoketag = os.environ.get("SpokeTag")
OtherAccountRoleApp = os.environ.get("OtherAccountRoleApp")
def find_subnets(ec2,region_id,vpc_id):
subnets_with_igw=ec2.describe_route_tables(Filters=[
{ 'Name': 'vpc-id', 'Values':[ vpc_id ]},
{ 'Name': 'route.gateway-id', 'Values': [ 'igw-*' ] }
])
subnetids=[]
for association in subnets_with_igw['RouteTables'][0]['Associations']:
if 'SubnetId' in association:
subnet_temp = {}
subnet_temp['SubnetId'] = association['SubnetId']
subnetids.append(subnet_temp)
for subnet in subnetids:
subnet_info=ec2.describe_subnets(Filters=[
{ 'Name': 'subnet-id', 'Values': [ subnet['SubnetId'] ] }
])
subnet['CidrBlock'] = subnet_info['Subnets'][0]['CidrBlock']
for tag in subnet_info['Subnets'][0]['Tags']:
if tag['Key'] == 'Name':
subnet['Name'] = tag['Value']
return subnetids
def get_credentials(rolearn):
session = boto3.session.Session()
client = session.client('sts')
assume_role_response = client.assume_role(RoleArn=rolearn,
RoleSessionName="aviatrix_poller" )
return assume_role_response
def handler(event, context):
#Gather all the regions:
ec2=boto3.client('ec2',region_name='us-east-1')
regions=ec2.describe_regions()
#Get Access information for OtherAccountRoleApp
if OtherAccountRoleApp:
logger.info('[Other Account]: Secondary aws account found.')
try:
other_credentials = get_credentials(OtherAccountRoleApp)
except:
logger.warning('!!!you might not have the right permissions!!!. Moving on...')
else:
logger.info('[Other Account]: Secondary aws account NOT found.')
#Findout if controller is busy:
for region in regions['Regions']:
region_id=region['RegionName']
logger.info('Checking region: %s for VPC that are processing or unpeering',region_id)
ec2=boto3.client('ec2',region_name=region_id)
#Find VPCs with Tag:spoketag = processing
#Create Gateway for it and Peer, when done change the Tag:spoketag = peered
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'processing', 'unpeering' ] }
])
#logger.info('vpcs with tag:spoketag is processing or unpeering: %s:' % str(vpcs))
if vpcs['Vpcs']: # ucc is busy now
logger.info('ucc is busy in adding/removing spoke of %s:' % str(vpcs['Vpcs']))
return {
'Status' : 'SUCCESS'
}
#Findout if controller is busy in OtherAccountRoleApp
if OtherAccountRoleApp:
if other_credentials:
for region in regions['Regions']:
region_id=region['RegionName']
logger.info('[Other Account] Checking region: %s for VPC that are processing or unpeering',region_id)
ec2=boto3.client('ec2',
region_name=region_id,
aws_access_key_id=other_credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=other_credentials['Credentials']['SecretAccessKey'],
aws_session_token=other_credentials['Credentials']['SessionToken'] )
#Find VPCs with Tag:spoketag = processing
#Create Gateway for it and Peer, when done change the Tag:spoketag = peered
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'processing', 'unpeering' ] }
])
#logger.info('vpcs with tag:spoketag is processing or unpeering: %s:' % str(vpcs))
if vpcs['Vpcs']: # ucc is busy now
logger.info('[Other Account] ucc is busy in adding/removing spoke of %s:' % str(vpcs['Vpcs']))
return {
'Status' : 'SUCCESS'
}
#Find Spokes waiting to be peered or unpeered
for region in regions['Regions']:
region_id=region['RegionName']
logger.info('Checking region: %s for VPC tagged %s' % (region_id,spoketag))
ec2=boto3.client('ec2',region_name=region_id)
#Find VPCs with Tag:spoketag = true
#Create Gateway for it and Peer, when done change the Tag:spoketag = peered
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'true', 'True', 'TRUE', 'test' ] }
])
for vpc_peering in vpcs['Vpcs']:
message = {}
message['action'] = 'deploygateway'
message['vpcid_spoke'] = vpc_peering['VpcId']
message['region_spoke'] = region_id
message['gwsize_spoke'] = gwsize_spoke
message['vpcid_hub'] = vpcid_hub
message['primary_account'] = True
if OtherAccountRoleApp:
message['otheraccount'] = True
#Finding the Public Subnet
try:
subnets=find_subnets(ec2, message['region_spoke'],message['vpcid_spoke'])
if subnets:
logger.warning('Subnets found: %s ' % (subnets))
message['subnet_spoke'] = subnets[0]['CidrBlock']
message['subnet_spoke_ha'] = subnets[1]['CidrBlock']
message['subnet_spoke_name'] = subnets[1]['Name']
except:
logger.warning('!!!your spoke vpc subnet is not setup correctly!!!')
continue
message['vpc_cidr_spoke'] = vpc_peering['CidrBlock']
logger.info('Found VPC %s waiting to be peered. Sending SQS message to Queue %s' % (message['vpcid_spoke'],gatewayqueue))
#Add New Gateway to SNS
sns = boto3.client('sns')
sns.publish(
TopicArn=gatewaytopic,
Subject='New Spoke Gateway',
Message=json.dumps(message)
)
# only add one spoke at a time, return now
return {
'Status' : 'SUCCESS'
}
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'false', 'False', 'FALSE' ] }
])
for vpc_peering in vpcs['Vpcs']:
message = {}
message['action'] = 'deletegateway'
message['subnet_spoke'] = vpc_peering['CidrBlock']
message['vpcid_spoke'] = vpc_peering['VpcId']
message['region_spoke'] = region_id
message['gwsize_spoke'] = gwsize_spoke
message['vpcid_hub'] = vpcid_hub
message['primary_account'] = True
if OtherAccountRoleApp:
message['otheraccount'] = True
logger.info('Found VPC %s waiting to be unpeered. Sending SQS message to Queue %s' % (message['vpcid_spoke'],gatewayqueue))
#Add New Gateway to SQS
#sqs = boto3.resource('sqs')
sns = boto3.client('sns')
#queue = sqs.get_queue_by_name(QueueName=gatewayqueue)
#response = queue.send_message(MessageBody=json.dumps(message))
sns.publish(
TopicArn=gatewaytopic,
Subject='Delete Spoke Gateway',
Message=json.dumps(message)
)
return {
'Status' : 'SUCCESS'
}
#Find Spokes waiting to be peered or unpeered in OtherAccountRoleApp
if OtherAccountRoleApp:
if other_credentials:
for region in regions['Regions']:
region_id=region['RegionName']
logger.info('[Other Account] Checking region: %s for VPC tagged %s' % (region_id,spoketag))
ec2=boto3.client('ec2',
region_name=region_id,
aws_access_key_id=other_credentials['Credentials']['AccessKeyId'],
aws_secret_access_key=other_credentials['Credentials']['SecretAccessKey'],
aws_session_token=other_credentials['Credentials']['SessionToken'] )
#Find VPCs with Tag:spoketag = true
#Create Gateway for it and Peer, when done change the Tag:spoketag = peered
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'true', 'True', 'TRUE' ] }
])
for vpc_peering in vpcs['Vpcs']:
message = {}
message['action'] = 'deploygateway'
message['vpcid_spoke'] = vpc_peering['VpcId']
message['region_spoke'] = region_id
message['gwsize_spoke'] = gwsize_spoke
message['vpcid_hub'] = vpcid_hub
message['primary_account'] = False
message['otheraccount'] = True
#Finding the Public Subnet
try:
subnets=find_subnets(ec2,message['region_spoke'],message['vpcid_spoke'])
if subnets:
logger.warning('Subnets found: %s ' % (subnets))
message['subnet_spoke'] = subnets[0]['CidrBlock']
message['subnet_spoke_ha'] = subnets[1]['CidrBlock']
message['subnet_spoke_name'] = subnets[1]['Name']
except:
logger.warning('!!!your spoke vpc subnet is not setup correctly!!!')
continue
message['vpc_cidr_spoke'] = vpc_peering['CidrBlock']
logger.info('Found VPC %s waiting to be peered. Sending SQS message to Queue %s' % (message['vpcid_spoke'],gatewayqueue))
#Add New Gateway to SNS
sns = boto3.client('sns')
sns.publish(
TopicArn=gatewaytopic,
Subject='New Spoke Gateway',
Message=json.dumps(message)
)
# only add one spoke at a time, return now
return {
'Status' : 'SUCCESS'
}
vpcs=ec2.describe_vpcs(Filters=[
{ 'Name': 'state', 'Values': [ 'available' ] },
{ 'Name': 'tag:'+spoketag, 'Values': [ 'false', 'False', 'FALSE' ] }
])
for vpc_peering in vpcs['Vpcs']:
message = {}
message['action'] = 'deletegateway'
message['subnet_spoke'] = vpc_peering['CidrBlock']
message['vpcid_spoke'] = vpc_peering['VpcId']
message['region_spoke'] = region_id
message['gwsize_spoke'] = gwsize_spoke
message['vpcid_hub'] = vpcid_hub
message['otheraccount'] = True
message['primary_account'] = False
logger.info('Found VPC %s waiting to be unpeered. Sending SQS message to Queue %s' % (message['vpcid_spoke'],gatewayqueue))
#Add New Gateway to SQS
#sqs = boto3.resource('sqs')
sns = boto3.client('sns')
#queue = sqs.get_queue_by_name(QueueName=gatewayqueue)
#response = queue.send_message(MessageBody=json.dumps(message))
sns.publish(
TopicArn=gatewaytopic,
Subject='Delete Spoke Gateway',
Message=json.dumps(message)
)
return {
'Status' : 'SUCCESS'
}
return {
'Status' : 'SUCCESS'
}
|
import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 7)
import numpy as np
n, w = map(int, readline().split())
vw = [list(map(int, readline().split())) for i in range(n)]
if n <= 30:
dp = [0]
check = [0]
for vv, ww in vw:
for j in range(len(dp)):
g = ww + check[j]
v = vv + dp[j]
if g > w:
continue
if g in check:
index = check.index(g)
dp[index] = min(dp[index], v)
else:
check.append(g)
dp.append(v)
print(max(dp))
elif all(vw[i][1] <= 1000 for i in range(n)):
dp = np.zeros(n * 1000 + 1, dtype=np.int64)
for vv, ww in vw:
dp[ww:] = np.maximum(dp[ww:], dp[:-ww] + vv)
print(dp[:w + 1].max())
else:
dp = np.zeros(n * 1000 + 1, dtype=np.int64)
dp[1:] = 10 ** 18
for vv, ww in vw:
dp[vv:] = np.minimum(dp[vv:], dp[:-vv] + ww)
ans = (dp <= w).nonzero()[0]
print(ans.max())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import hashlib
import re
import os
import json
bdir = "/Users/yuanliqiang/workspace/youtube-dl/"
sdir = "/Users/yuanliqiang/youtube/"
ddir = "/Users/yuanliqiang/tmp/"
lx = 1350
ly = 1010
logo = "/Users/yuanliqiang/workspace/youtube-dl/mengmadianjing.png"
proxy = "socks5://127.0.0.1:1081"
display = {}
def gettitle(url):
# test proxy connection
cmd = "{bin}youtube-dl -F --no-check-certificate --proxy {proxy} \"{url}\"".format(
bin=bdir,
url=url,
proxy=proxy
)
print(cmd)
os.system(cmd)
# get title
shell = "{bin}youtube-dl -f 'best' --buffer-size 16k --retries infinite --no-check-certificate --proxy {proxy} --get-title \"{url}\"".format(
bin=bdir,
proxy=proxy,
url=url
)
print(shell)
p = subprocess.Popen(shell, shell=True, stdout=subprocess.PIPE)
out, err = p.communicate()
return out.strip().decode('utf-8')
def download(url):
title = re.sub(r'[\\\/\:\*\?\"\'\<\>\|\.]', "", gettitle(url))
hl = hashlib.md5()
hl.update(title.encode())
md5str = hl.hexdigest()
spath = sdir+md5str+".mp4"
dpath = ddir+md5str+".mp4"
# download vc
shell = [
"{bin}youtube-dl -f '(bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4])' --buffer-size 16k --write-thumbnail --retries infinite --no-check-certificate".format(
bin=bdir
)]
shell.append("--proxy {proxy} \"{url}\" -o '{spath}'".format(
proxy=proxy,
url=url,
spath=spath
))
print(shell)
os.system(" ".join(shell))
cmd = "ffmpeg -y -hwaccel videotoolbox -threads 4 -i {spath} -c:v h264_videotoolbox -vf \"movie={logo}[watermark];[in][watermark]overlay={lx}:{ly}\" -pix_fmt yuv420p -s hd1080 -b:v 6800K -acodec copy {dpath}".format(
spath=spath,
logo=logo,
lx=lx,
ly=ly,
dpath=dpath
)
os.system(cmd)
os.rename(dpath, ddir+title+".mp4")
if __name__ == "__main__":
with open("/Users/yuanliqiang/workspace/youtube-dl/lst","r") as f:
for line in f.readlines():
download(line)
|
import pytest
from src.get_minimum_cost import get_minimum_cost
cases = [
(3, [2, 5, 6], 13),
(2, [2, 5, 6], 15),
(3, [1, 3, 5, 7, 9], 29),
(2, [1, 3, 5, 7, 9], 35),
]
@pytest.mark.parametrize("friends, flowers, expected", cases)
def test_get_minimum_cost(friends, flowers, expected):
result = get_minimum_cost(friends, flowers)
assert result == expected
|
import numpy as np
a1 = np.ndarray(shape=(2, 2)) # 랜덤 값으로 초기화
print(a1)
print(a1[0])
print(a1[1])
print(a1[0][1])
a2 = np.ndarray(shape=(2, 2, 3)) # 랜덤 값으로 초기화
# 3개의 값을 갖는 열을 2개 갖는 행렬을 2개 갖는다.
print(a2)
print(a2[0]) # 제일 바깥 쪽에 접근한다.
a2[0][0] = [1, 2, 3]
print(a2[0][1]) # 제일 바깥 쪽에 접근한다.
a3 = a2.reshape(2, 6)
print(a3)
a4 = np.arange(20).reshape(2, 10)
print(a4)
a5 = a4.reshape(5, 2, 2)
print(a5)
# reshape
a6 = np.ravel(a5, order='F')
print(a6)
# ipython이 연습에 더 적절하다.
|
#!/usr/bin/env python
"""
Updates ug and ug-GRADYEAR groups based on the members table.
"""
import argparse
from datetime import date
from donut.pymysql_connection import make_db
today = date.today()
# Compute the start of the current academic year.
# This is the next calendar year if the script is run between July and December.
year_end = today.year + (1 if today.month > 6 else 0)
def update_ug_groups(env):
db = make_db(env)
try:
db.begin()
with db.cursor() as cursor:
ug_group = get_ug_group(cursor)
ug_type = ug_group['type']
ug_pos_id = ug_group['pos_id']
ug_admin_positions = get_ug_admin_positions(cursor)
remove_ug_group_members(cursor, ug_pos_id)
remove_ug_year_group_members(cursor)
add_ug_group_members(cursor, ug_pos_id)
pos_ids = make_ug_year_groups(cursor, ug_type, ug_admin_positions)
add_ug_year_members(cursor, pos_ids)
db.commit()
finally:
db.close()
def get_ug_group(cursor):
query = """
SELECT type, pos_id
FROM groups NATURAL JOIN positions
WHERE group_name = 'ug' AND pos_name = 'Member'
"""
cursor.execute(query)
ug_group = cursor.fetchone()
if ug_group is None:
raise Exception('"ug" group/position not found')
return ug_group
def get_ug_admin_positions(cursor):
query = """
SELECT pos_id_from
FROM position_relations JOIN positions ON pos_id_to = pos_id NATURAL JOIN groups
WHERE group_name = 'ug' AND pos_name = 'Admin'
"""
cursor.execute(query)
return [relation['pos_id_from'] for relation in cursor.fetchall()]
def remove_ug_group_members(cursor, ug_pos_id):
cursor.execute('DELETE FROM position_holders WHERE pos_id = %s', ug_pos_id)
def remove_ug_year_group_members(cursor):
cursor.execute("""
DELETE position_holders
FROM position_holders NATURAL JOIN positions NATURAL JOIN groups
WHERE group_name LIKE 'ug-%' AND pos_name = 'Member'
""")
def add_ug_group_members(cursor, ug_pos_id):
query = """
INSERT INTO position_holders(pos_id, user_id)
SELECT %s, user_id FROM members WHERE graduation_year >= %s
"""
members = cursor.execute(query, (ug_pos_id, year_end))
print('Added', members, 'undergrads to ug')
def make_ug_year_groups(cursor, group_type, ug_admin_positions):
get_years_query = """
SELECT DISTINCT graduation_year
FROM members WHERE graduation_year IS NOT NULL
"""
get_position_query = """
SELECT pos_id
FROM groups NATURAL JOIN positions
WHERE group_name = %s AND pos_name = 'Member'
"""
make_group_query = """
INSERT INTO groups(group_name, group_desc, type, newsgroups)
VALUES (%s, %s, %s, TRUE)
"""
make_position_query = """
INSERT INTO positions(group_id, pos_name)
VALUES (%s, 'Member')
"""
make_admin_position_query = """
INSERT INTO positions(group_id, pos_name, send, control, receive)
VALUES (%s, 'Admin', TRUE, TRUE, FALSE)
"""
add_admin_relation_query = """
INSERT INTO position_relations(pos_id_from, pos_id_to) VALUES (%s, %s)
"""
pos_ids = {}
cursor.execute(get_years_query)
for graduation_year in cursor.fetchall():
year = graduation_year['graduation_year']
year_str = str(year)
group_name = 'ug-' + year_str
cursor.execute(get_position_query, group_name)
position = cursor.fetchone()
if position is not None:
pos_ids[year] = position['pos_id']
continue
group_desc = 'Undergrads graduating in ' + year_str
cursor.execute(make_group_query, (group_name, group_desc, group_type))
group_id = cursor.lastrowid
cursor.execute(make_position_query, group_id)
pos_ids[year] = cursor.lastrowid
cursor.execute(make_admin_position_query, group_id)
admin_pos_id = cursor.lastrowid
for pos_id in ug_admin_positions:
cursor.execute(add_admin_relation_query, (pos_id, admin_pos_id))
print('Created group', group_name)
return pos_ids
def add_ug_year_members(cursor, pos_ids):
query = """
INSERT INTO position_holders(pos_id, user_id)
SELECT %s, user_id FROM members WHERE graduation_year = %s
"""
for year, pos_id in pos_ids.items():
members = cursor.execute(query, (pos_id, year))
print('Added', members, 'undergrads to ug-' + str(year))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=
'Updates ug and ug-GRADYEAR groups based on the members table')
parser.add_argument(
'-e', '--env', default='dev', help='Database to update (default dev)')
args = parser.parse_args()
update_ug_groups(args.env)
|
from django.db import models
class RawCPU(models.Model):
percent = models.FloatField()
class CPU(models.Model):
percent = models.FloatField()
class ArchiveCPU(models.Model):
percent = models.FloatField()
|
from django.contrib import admin
from django.urls import path
from admin_wizard.admin import UpdateAction, UpdateDialog
from testproject.testapp import models, forms
@admin.register(models.MyModel)
class MyModelAdmin(admin.ModelAdmin):
actions = [UpdateAction(form_class=forms.RenameForm)]
def get_urls(self):
urls = [
path('<int:pk>/rename/',
UpdateDialog.as_view(model_admin=self,
model=models.MyModel,
form_class=forms.RenameForm),
name='rename')
]
return urls + super().get_urls()
|
from random import randint
from types import ListType, StringType
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
def dfp_footer(context):
result = """
<script type="text/javascript">
var googletag = googletag || {};
googletag.cmd = googletag.cmd || [];
(function() {
var gads = document.createElement("script");
gads.async = true;
gads.type = "text/javascript";
var useSSL = "https:" == document.location.protocol;
gads.src = (useSSL ? "https:" : "http:") + "//www.googletagservices.com/tag/js/gpt.js";
var node =document.getElementsByTagName("script")[0];
node.parentNode.insertBefore(gads, node);
})();
</script>
<script type="text/javascript">
googletag.cmd.push(function() {
var stack = new Array();
var reserved = ['slot_name', 'id', 'width', 'height', 'style', 'class'];
var arr = document.getElementsByTagName('div');
for (var i=0; i<arr.length; i++) {
if (arr[i].className == 'gpt-ad') {
var slot_name = arr[i].getAttribute('slot_name');
var id = arr[i].getAttribute('id');
var width = parseInt(arr[i].getAttribute('width'));
var height = parseInt(arr[i].getAttribute('height'));
var slot = googletag.defineSlot(slot_name, [width, height], id).addService(googletag.pubads());
for (var j=0; j<arr[i].attributes.length; j++) {
var attr = arr[i].attributes[j];
if (attr.name.indexOf('data-pair-') == 0){
var key = attr.name.slice(10);
var value = attr.value.split('|');
slot.setTargeting(key, value);
}
}
stack.push(slot);
}
}
googletag.pubads().addEventListener("slotRenderEnded", function(e) {
var slotId = e.slot.getSlotId();
var $slot = $("#" + slotId.getDomId());
if($slot.find("iframe:not([id*=hidden])")
.map(function() {
return this.contentWindow.document;
})
.find("body").children().length > 0) {
$slot.closest('.advertisement').show();
}
});
googletag.pubads().collapseEmptyDivs();
googletag.enableServices();
var arr = document.getElementsByTagName('div');
for (var i=0; i<arr.length; i++) {
if (arr[i].className == 'gpt-ad') {
var id = arr[i].getAttribute('id');
googletag.cmd.push(function() { googletag.display(id); });
}
}
});
</script>"""
return mark_safe(result)
@register.tag
def dfp_tag(parser, token):
tokens = token.split_contents()[1:]
if len(tokens) < 4:
raise template.TemplateSyntaxError(
'dfp tag requires arguments slot_name width height targeting_key_1="targeting_value_11",..,"targeting_value_12" ...'
)
li = tokens[:3]
keyvals = {}
for l in tokens[3:]:
k, v = l.split('=')
keyvals[k] = v
return DfpTagNode(*li, **keyvals)
class DfpTagNode(template.Node):
def __init__(self, slot_name, width, height, **keyvals):
self.slot_name = template.Variable(slot_name)
self.width = template.Variable(width)
self.height = template.Variable(height)
self.keyvals = {}
for k, v in keyvals.items():
self.keyvals[template.Variable(k)] = template.Variable(v)
def render(self, context):
# Resolve values
slot_name = self.slot_name.resolve(context)
width = self.width.resolve(context)
height = self.height.resolve(context)
pairs = {}
for k, v in self.keyvals.items():
try:
k_resolved = k.resolve(context)
except template.VariableDoesNotExist:
k_resolved = k
try:
v_resolved = v.resolve(context)
except template.VariableDoesNotExist:
continue
if isinstance(v_resolved, StringType):
v_resolved = v_resolved.split(',')
elif not isinstance(v_resolved, ListType):
v_resolved = [v_resolved]
pairs[k_resolved] = v_resolved
# Prepare tag
rand_id = randint(0, 2000000000)
di = {
'rand_id': rand_id,
'slot_name': slot_name,
'width': width,
'height': height
}
result = """
<div id="div-gpt-ad-%(rand_id)s" class="gpt-ad"
style="width: %(width)dpx; height: %(height)dpx;"
slot_name="%(slot_name)s"
width="%(width)s"
height="%(height)s"
""" % di
# Append pairs
for k, v in pairs.items():
result += ' data-pair-%s="%s"' % (k, '|'.join(v))
result += "></div>"
return result
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2016 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# #*** <License> ************************************************************#
# This module is part of the package GTW.RST.TOP.MOM.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# GTW.RST.TOP.MOM.Query_Restriction
#
# Purpose
# Provide query restriction for RESTful TOP.MOM resources
#
# Revision Dates
# 30-Jul-2012 (CT) Creation (factored from GTW.NAV.E_Type.Query_Restriction)
# 11-Oct-2016 (CT) Change `GTW.HTML` to `TFL.HTML`
# ««revision-date»»···
#--
from _TFL.HTML import Styler_Safe
from _GTW._RST._MOM.Query_Restriction import *
import _GTW._RST._TOP._MOM
from _TFL.I18N import _, _T, _Tn
from _TFL.pyk import pyk
_Ancestor = RST_Query_Restriction
class TOP_Query_Restriction (_Ancestor) :
"""Query restriction for RESTful TOP.MOM resources."""
_real_name = "Query_Restriction"
@TFL.Meta.Class_and_Instance_Method
def _qop_desc (soc, qop) :
return TFL.Record \
( desc = _T (qop.desc)
, label = Styler_Safe (_T (qop.op_sym))
)
# end def _qop_desc
Query_Restriction = TOP_Query_Restriction # end class
class TOP_Query_Restriction_Spec (MOM.Attr.Querier.E_Type) :
"""Query restriction spec for a E_Type-specific page."""
_real_name = "Query_Restriction_Spec"
def __init__ (self, E_Type, field_names = None) :
sel = MOM.Attr.Selector.Name (* field_names) if field_names else None
self.__super.__init__ (E_Type, sel)
# end def __init__
@property
def As_Json_Cargo (self) :
result = self.__super.As_Json_Cargo
op_map = result ["op_map"]
for k, v in pyk.iteritems (op_map) :
v ["label"] = Styler_Safe (v ["sym"])
return result
# end def As_Json_Cargo
Query_Restriction_Spec = TOP_Query_Restriction_Spec # end class
if __name__ != "__main__" :
GTW.RST.TOP.MOM._Export ("*")
### __END__ GTW.RST.TOP.MOM.Query_Restriction
|
name = "openEPhys_DACQ"
|
import enum
import json
import unittest
from typing import List, Optional, Dict
from pynlp.utils.config import ConfigDict, _type_check, ConfigDictParser
from pynlp.utils.type_inspection import get_contained_type
class ConfigA(ConfigDict):
# def __init__(self, **kwargs):
# super().__init__(**kwargs)
field_a: float = 0.4
field_b: str = "Test A"
class ConfigB(ConfigA):
required_a: int
field_b: str = "Test AB"
field_c: float = 0.5
field_d: str = "Test B"
class ConfigC(ConfigDict):
a: ConfigA = ConfigA()
field_c: float = 0.5
field_d: str = "Test B"
class ConfigD(ConfigDict):
a_list: List[ConfigA] = [ConfigA()]
a_dict: Dict[str, ConfigA] = {"a_key": ConfigA()}
class Base(ConfigDict):
field_a: float = 1e-2
class VariantA(Base):
field_a: float = 1e-3
special: str = "apple"
class VariantB(Base):
field_a: float = 1e-2
special: str = "oranges"
is_citrus: bool = True
class VariantC(Base):
field_a: float = 1e-1
special: int = 3
bounds: List[int] = [1, 2]
class AnEnum(enum.Enum):
APPLE = 1
BANANA = 2
CARROT = "3"
class WithEnum(ConfigDict):
field: AnEnum = AnEnum.APPLE
class ConfigDictTest(unittest.TestCase):
def test_generic_types(self):
"""
Test that _get_generic_types works : this is an unstable interface, so all the more reason to test.
:return:
"""
self.assertEqual([int, ], get_contained_type(List[int]))
self.assertEqual([ConfigA, ], get_contained_type(List[ConfigA]))
self.assertEqual([int, type(None), ], get_contained_type(Optional[int]))
def test_type_checking(self):
self.assertTrue(_type_check(False, bool))
self.assertFalse(_type_check(0, bool))
self.assertTrue(_type_check(3, float))
self.assertTrue(_type_check(3.0, float))
self.assertFalse(_type_check("3", float))
self.assertTrue(_type_check(3, int))
self.assertFalse(_type_check(3.0, int))
self.assertFalse(_type_check("3", int))
self.assertTrue(_type_check("3", str))
self.assertFalse(_type_check(3, str))
self.assertTrue(_type_check([], List[int]))
self.assertTrue(_type_check([3, 4], List[int]))
self.assertFalse(_type_check("4", List[int]))
self.assertFalse(_type_check([3, "4"], List[int]))
self.assertTrue(_type_check(3, Optional[int]))
self.assertTrue(_type_check(None, Optional[int]))
self.assertFalse(_type_check("4", Optional[int]))
# test type check None
self.assertTrue(_type_check(None, type(None)))
self.assertFalse(_type_check(3, type(None)))
# test type check totally unsupported type
class Unsupported:
pass
with self.assertRaises(ValueError):
self.assertFalse(_type_check(3, Unsupported))
# test config dict type but pass in non dict
self.assertFalse(_type_check(3, ConfigA))
# test dict type but pass in non dict
self.assertFalse(_type_check(3, dict))
# test type check with error inside nested config dict
class BadValidate(ConfigDict):
a: int
def _validate(self):
assert False
self.assertFalse(_type_check({'a': 3}, BadValidate))
def test_default(self):
config = ConfigA()
self.assertAlmostEqual(config.field_a, 0.4)
self.assertEqual(config.field_b, "Test A")
def test_setting(self):
config = ConfigA(field_b="TestTest A", field_a=4)
self.assertAlmostEqual(config.field_a, 4.)
self.assertEqual(config.field_b, "TestTest A")
def test_typing(self):
self.assertRaises(ValueError, lambda: ConfigA(field_b=3.14))
def test_subclassing(self):
config = ConfigB(required_a=3)
self.assertAlmostEqual(config.field_a, 0.4)
self.assertEqual(config.field_b, "Test AB")
self.assertAlmostEqual(config.field_c, 0.5)
self.assertEqual(config.field_d, "Test B")
self.assertEqual(config.required_a, 3)
self.assertRaises(ValueError, lambda: ConfigB(required_a=3, field_b=3.14))
def test_required_properties(self):
self.assertRaises(ValueError, lambda: ConfigB())
def test_serialization(self):
config = ConfigB(required_a=4)
self.assertEqual(config.as_json(), {
"field_a": 0.4,
"field_b": "Test AB",
"field_c": 0.5,
"field_d": "Test B",
"required_a": 4,
})
config_ = ConfigB(**config.as_json())
self.assertEqual(config, config_)
def test_nested_serialization(self):
config = ConfigC()
self.assertEqual(config.as_json(), {
"a": {
"field_a": 0.4,
"field_b": "Test A",
},
"field_c": 0.5,
"field_d": "Test B",
})
config_ = ConfigC(**json.loads(json.dumps(config.as_json())))
self.assertEqual(config, config_)
config = ConfigD()
config_ = ConfigD(**json.loads(json.dumps(config.as_json())))
self.assertEqual(config, config_)
def test_casting(self):
# Without values.
config = Base()
self.assertEqual(config.as_json(), {
"field_a": 1e-2,
})
self.assertEqual({
# Note that this value is not going to be updated because up-casting can't override default values.
"field_a": 1e-2,
"special": "apple",
}, VariantA.cast(config).as_json())
self.assertEqual({
"field_a": 1e-2,
"special": "oranges",
"is_citrus": True,
}, VariantB.cast(config).as_json())
self.assertEqual({
"field_a": 1e-2,
"special": 3,
"bounds": [1, 2],
}, VariantC.cast(config).as_json())
data = {
"special": 5,
"bounds": [4, 6],
}
config = Base(**data)
self.assertEqual({
"field_a": 1e-2,
"special": 5,
"bounds": [4, 6],
}, VariantC.cast(config).as_json())
class Unsupported:
pass
class BadConfig(ConfigDict):
field_a: Unsupported
config = Base(**{'field_a': 1e-2})
with self.assertRaises(ValueError):
BadConfig.cast(config)
def test_enums(self):
self.assertEqual({"field": "APPLE"}, WithEnum().as_json())
self.assertEqual(AnEnum.APPLE, WithEnum(field="APPLE").field)
self.assertEqual(AnEnum.BANANA, WithEnum(field="BANANA").field)
self.assertEqual(AnEnum.CARROT, WithEnum(field="CARROT").field)
def test_parsing(self):
self.assertEqual({}, ConfigDictParser.parse(''))
# String quoting
self.assertEqual({'name': 'the "best" name'},
ConfigDictParser.parse('name="the \\"best\\" name"'))
self.assertEqual({'classes': ["a.b", "b.c", "c.d", "none"]},
ConfigDictParser.parse('classes = [a.b, b.c, c.d, "none"]'))
# Typing
self.assertEqual({'epochs': 10}, ConfigDictParser.parse('epochs=10'))
self.assertEqual({'early_stop': True}, ConfigDictParser.parse('early_stop=true'))
# Lists
self.assertEqual({'ff_dim': [128, 128, 512]},
ConfigDictParser.parse('ff_dim=[128, 128, 512]'))
self.assertEqual({'classes': ["general.yes", "general.no"]}, ConfigDictParser
.parse('classes=[\"general.yes\",\"general.no\"]'))
# Hierarchical 1
self.assertEqual({'optimizer': {"type": "adam"}},
ConfigDictParser.parse('optimizer.type=adam'))
# Hierarchical 2
self.assertEqual({'optimizer': {
"type": "adam", "lr": 1e-4, "betas": [1e-4, 1e-5],
"scheduler": {"type": "ReduceLROnPlateau"}}
}, ConfigDictParser.parse(
'optimizer=(type=adam lr=1e-4 betas=[1e-4, 1e-5] scheduler=(type=ReduceLROnPlateau))')
)
# Dicts
self.assertEqual({'a': {'hi': 1, 'hello': 2}}, ConfigDictParser.parse("a.hi=1 a.hello=2"))
# Booleans
self.assertEqual({'a': True, 'b': False},
ConfigDictParser.parse("a=true b=false"))
self.assertEqual({'a': True, 'b': False},
ConfigDictParser.parse("a=True b=False"))
def test_nested_update(self):
class A(ConfigDict):
field1: str
field2: str
class B(ConfigDict):
a: A
cfg = B(**ConfigDictParser.parse("a.field1=x a.field2=y"))
self.assertEqual({"a": {"field1": "x", "field2": "y"}}, cfg.as_json())
cfg.update(**ConfigDictParser.parse("a.field1=z"))
self.assertEqual({"a": {"field1": "z", "field2": "y"}}, cfg.as_json())
def test_validate(self):
class A(ConfigDict):
field1: str
field2: str
def _validate(self):
assert self.field1 == "correct"
self.assertEqual(
{'field1': 'correct', 'field2': 'something'},
A(**{'field1': 'correct', 'field2': 'something'}).as_json()
)
with self.assertRaises(ValueError):
A(**{'field1': 'wrong', 'field2': 'something'})
def test_nested_configs(self):
class Parent:
class A(ConfigDict):
field1: str
class B(Parent.A):
pass
class D(B):
field2: str = "b"
with self.assertRaises(ValueError):
D()
cfg = D(field1="a")
self.assertEqual({"field1": "a", "field2": "b"}, cfg.as_json())
self.assertEqual("a", cfg.field1)
self.assertEqual("b", cfg.field2)
def test_optional_subconfig(self):
class OptionalSubConfig(ConfigDict):
field: Optional[ConfigA] = None
cfg = OptionalSubConfig()
self.assertEqual({"field": None}, cfg.as_json())
cfg = OptionalSubConfig(field={"field_a": 1.2, "field_b": "b"})
self.assertEqual({"field": {"field_a": 1.2, "field_b": "b"}}, cfg.as_json())
self.assertIsInstance(cfg.field, ConfigA)
def test_list(self):
class A(ConfigDict):
field1: List[float]
cfg = A(field1=[1.1, 1.2, 1.3])
self.assertEqual({"field1": [1.1, 1.2, 1.3]}, cfg.as_json())
self.assertEqual(1.1, cfg.field1[0])
self.assertEqual(1.2, cfg.field1[1])
self.assertEqual(1.3, cfg.field1[2])
with self.assertRaises(ValueError):
A(field1=["1.1", "1.2", "1.3"])
def test_dict(self):
class A(ConfigDict):
field1: Dict[str, int]
cfg = A(field1={"a": 1, "b": 2})
self.assertEqual({"field1": {"a": 1, "b": 2}}, cfg.as_json())
self.assertEqual(1, cfg.field1["a"])
self.assertEqual(2, cfg.field1["b"])
with self.assertRaises(ValueError):
A(field1={"a": 1, "b": "b"})
|
sc.addPyFile('magichour.zip')
from magichour.api.dist.templates.templateGen import gen_tamplate_from_logs, read_logs_from_uri
transforms_URI = 'hdfs://namenode/magichour/simpleTrans'
raw_log_URI = 'hdfs://namenode/magichour/tbird.500k.gz'
template_output_URI = 'hdfs://namenode/magichour/templates'
support = 1000
# Read in log file RDD
# Note: You may want to set persistence to MEMORY_ONLY or
# MEMORY_AND_DISK_SER depending on data size
preprocessed_log_rdd = read_logs_from_uri(
sc,
raw_log_URI,
preprocess_log=True,
transforms_URI=transforms_URI).cache()
# Generate Templates
templates = gen_tamplate_from_logs(sc, preprocessed_log_rdd, support)
# Persist to disk for subsequent Analysis
sc.parallelize(templates, 1).saveAsPickleFile(template_output_URI)
|
import asyncio
import contextlib
from unittest import mock
import datetime
import base64
import pytest
from ircd import IRC, Server
from ircd.irc import SERVER_NAME, SERVER_VERSION
pytestmark = pytest.mark.asyncio
HOST = "localhost"
ADDRESS = "127.0.0.1"
PORT = 9001
@contextlib.asynccontextmanager
async def connect(address=ADDRESS, port=PORT):
reader, writer = await asyncio.open_connection(address, port)
yield reader, writer
writer.close()
@contextlib.asynccontextmanager
async def server_conn(address=ADDRESS, port=PORT):
irc = IRC(HOST)
server = Server(irc, ping_interval=5)
asyncio.create_task(server.run(address, port))
await server.running.wait()
async with connect(address, port) as (reader, writer):
yield irc, reader, writer
await server.shutdown()
async def send(conn, messages):
conn.write(("\r\n".join(messages) + "\r\n").encode())
await conn.drain()
# FIXME nasty hack
async def readall(reader):
lines = []
while True:
try:
b = await asyncio.wait_for(reader.readline(), .1)
if not b:
break
except asyncio.exceptions.TimeoutError:
break
line = b.strip().decode()
assert " PING " not in line
lines.append(line)
return lines
async def ident(reader, writer, irc, nick):
await send(writer, [
"NICK {}".format(nick),
"USER {} 0 * :{}".format(nick, nick)
])
assert await readall(reader) == [
":{}!{}@{} NICK :{}".format(nick, nick, HOST, nick),
":localhost 001 {} :Welcome to the Internet Relay Network {}!{}@{}".format(nick, nick, nick, HOST),
":localhost 002 {} :Your host is {}, running version {}".format(nick, SERVER_NAME, SERVER_VERSION),
":localhost 003 {} :This server was created {}".format(nick, irc.created),
":localhost 004 {} :{} {} abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".format(nick, SERVER_NAME, SERVER_VERSION),
':localhost 005 {} :AWAYLEN= CASEMAPPING=ascii CHANLIMIT= CHANTYPES=#'.format(nick),
':localhost 251 * :There are {} user(s) on {} server(s)'.format(len(irc.nicknames), len(irc.links) + 1),
':localhost 252 0 :There are 0 operator(s) online',
':localhost 254 {} :There are {} channels(s) formed'.format(len(irc.channels), len(irc.channels)),
':localhost 255 * :I have {} client(s) and {} server(s)'.format(len(irc.clients), len(irc.links) + 1),
':localhost 221 {} :+s'.format(nick),
':localhost 375 {} :- message of the day -'.format(nick),
':localhost 372 {} :hello world'.format(nick),
':localhost 376 {} :- end of message -'.format(nick),
]
async def join(reader, writer, irc, nickname, channel_name):
await send(writer, [
"JOIN {}".format(channel_name)
])
replies = await readall(reader)
channel = irc.get_channel(channel_name)
assert channel
assert irc.get_nickname(nickname) in channel.members
members = sorted([member.nickname for member in channel.members])
assert replies == [
":{}!{}@{} JOIN :{}".format(nickname, nickname, HOST, channel_name),
":localhost 331 {} :{}".format(nickname, channel_name),
":localhost 353 {} = {} :{}".format(nickname, channel_name, " ".join(members)),
":localhost 366 {} {} :End of /NAMES list.".format(nickname, channel_name),
]
async def part(reader, writer, irc, nickname, chan, message=None):
if message:
cmd = "PART {} :{}".format(chan, message)
else:
cmd = "PART {}".format(chan)
await send(writer, [cmd])
if message:
value = ":{}!{}@{} PART {} :{}".format(nickname, nickname, HOST, chan, message)
else:
value = ":{}!{}@{} PART :{}".format(nickname, nickname, HOST, chan)
assert await readall(reader) == [value]
channel = irc.get_channel(chan)
nickname = irc.get_nickname(nickname)
if nickname and channel:
assert nickname not in channel.members
@pytest.mark.asyncio
async def test_ident():
async with server_conn() as (irc, reader, writer):
await ident(reader, writer, irc, "foo")
"foo" in irc.nick_client
"foo" in irc.nicknames
client = irc.lookup_client("foo")
assert client.has_identity
assert client.has_nickname
@pytest.mark.asyncio
async def test_nick():
async with server_conn() as (irc, reader, writer):
await ident(reader, writer, irc, "foo")
client = irc.lookup_client("foo")
assert client.name == "foo"
assert "foo" in irc.nicknames
await send(writer, [
"NICK :bar"
])
assert await readall(reader) == [":foo!foo@localhost NICK :bar"]
assert "foo" not in irc.nicknames
assert client.name == "bar"
assert "bar" in irc.nicknames
@pytest.mark.asyncio
async def test_join_part():
async with server_conn() as (irc, reader, writer):
await ident(reader, writer, irc, "foo")
await join(reader, writer, irc, "foo", "#")
assert "#" in irc.channels
channel = irc.channels["#"]
assert channel.name == "#"
assert [member.nickname for member in channel.members] == ["foo"]
assert channel.owner.nickname == "foo"
nickname = irc.get_nickname("foo")
assert [chan.name for chan in nickname.channels] == ["#"]
await part(reader, writer, irc, "foo", "#", message="byebye")
assert channel.members == []
assert [chan.name for chan in nickname.channels] == []
await join(reader, writer, irc, "foo", "#")
assert "#" not in irc.channels
@pytest.mark.asyncio
async def test_join_key():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await send(writer_a, [
"MODE # +k :sekret"
])
assert await readall(reader_a) == [
":foo!foo@localhost MODE # +k :sekret"
]
assert irc.channels["#"].key == "sekret"
await ident(reader_b, writer_b, irc, "bar")
await send(writer_b, [
"JOIN :#"
])
assert await readall(reader_b) == [
":localhost 475 bar :# :Cannot join channel (+k)"
]
await send(writer_b, [
"JOIN # :sekret"
])
assert (await readall(reader_b))[:1] == [
":bar!bar@localhost JOIN :#"
]
@pytest.mark.asyncio
async def test_privmsg_channel():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
# not joined yet
await send(writer_a, [
"PRIVMSG # :hello world"
])
assert await readall(reader_a) == [
":localhost 403 foo :# No such nick/channel"
]
await join(reader_a, writer_a, irc, "foo", "#")
await ident(reader_b, writer_b, irc, "bar")
await join(reader_b, writer_b, irc, "bar", "#")
channel = irc.channels["#"]
assert [member.nickname for member in channel.members] == ["foo", "bar"]
await send(writer_a, [
"PRIVMSG # :hello world"
])
assert await readall(reader_a) == [
":bar!bar@localhost JOIN :#"
] # no reply to self
assert await readall(reader_b) == [
":foo!foo@localhost PRIVMSG # :hello world"
]
await part(reader_a, writer_a, irc, "foo", "#")
await send(writer_a, [
"PRIVMSG # :hello world"
])
assert await readall(reader_a) == [
":localhost 441 :foo"
]
@pytest.mark.asyncio
async def test_privmsg_client():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await ident(reader_b, writer_b, irc, "bar")
await send(writer_a, [
"PRIVMSG bar :hello world"
])
assert await readall(reader_b) == [
":foo!foo@localhost PRIVMSG bar :hello world"
]
@pytest.mark.asyncio
async def test_notice_client():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await ident(reader_b, writer_b, irc, "bar")
await send(writer_a, [
"NOTICE bar :hello world"
])
assert await readall(reader_b) == [
":foo!foo@localhost NOTICE bar :hello world"
]
@pytest.mark.asyncio
async def test_notice_channel():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await ident(reader_b, writer_b, irc, "bar")
await join(reader_b, writer_b, irc, "bar", "#")
await send(writer_a, [
"NOTICE # :hello world"
])
assert await readall(reader_a) == [
":bar!bar@localhost JOIN :#"
] # no reply to self
assert await readall(reader_b) == [
":foo!foo@localhost NOTICE # :hello world"
]
@pytest.mark.asyncio
async def test_user_mode():
async with server_conn() as (irc, reader, writer):
await ident(reader, writer, irc, "foo")
nickname = irc.nicknames["foo"]
assert nickname.mode.mode == "s"
await send(writer, [
"MODE foo :+i"
])
assert await readall(reader) == [
":foo!foo@localhost MODE foo :+i"
]
assert nickname.is_invisible
assert nickname.mode.mode == "is"
await send(writer, [
"MODE foo :-i"
])
assert await readall(reader) == [
":foo!foo@localhost MODE foo :-i"
]
assert not nickname.is_invisible
assert nickname.mode.mode == "s"
@pytest.mark.asyncio
async def test_channel_mode():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await ident(reader_b, writer_b, irc, "bar")
await join(reader_b, writer_b, irc, "bar", "#")
assert await readall(reader_a) == [
":bar!bar@localhost JOIN :#"
]
channel = irc.channels["#"]
# check operator
await send(writer_b, [
"MODE # :+n"
])
assert await readall(reader_b) == [
":localhost 482 bar :# You're not channel operator"
]
await send(writer_a, [
"MODE # :+n"
])
assert await readall(reader_a) == [
":foo!foo@localhost MODE # :+n"
]
assert await readall(reader_b) == [
":foo!foo@localhost MODE # :+n"
]
assert channel.mode.mode == "n"
await send(writer_a, [
"MODE # :-n"
])
assert await readall(reader_a) == [
":foo!foo@localhost MODE # :-n"
]
assert channel.mode.mode == ""
@pytest.mark.asyncio
async def test_channel_operator():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await ident(reader_b, writer_b, irc, "bar")
await join(reader_b, writer_b, irc, "bar", "#")
assert await readall(reader_a) == [
":bar!bar@localhost JOIN :#"
]
channel = irc.channels["#"]
await send(writer_a, [
"MODE # +o :bar"
])
assert await readall(reader_a) == [
":foo!foo@localhost MODE # +o :bar"
]
assert await readall(reader_b) == [
":foo!foo@localhost MODE # +o :bar"
]
nickname = irc.get_nickname("bar")
assert nickname in channel.operators
await send(writer_a, [
"MODE # -o :bar"
])
assert await readall(reader_a) == [
":foo!foo@localhost MODE # -o :bar"
]
assert await readall(reader_b) == [
":foo!foo@localhost MODE # -o :bar"
]
assert nickname not in channel.operators
@pytest.mark.asyncio
async def test_set_channel_secret():
async with server_conn() as (irc, reader, writer):
await ident(reader, writer, irc, "foo")
await join(reader, writer, irc, "foo", "#")
await send(writer, [
"MODE # :+k"
])
assert await readall(reader) == [
":localhost 461 foo MODE :Not enough parameters"
]
await send(writer, [
"MODE # +k :sekret"
])
assert await readall(reader) == [
":foo!foo@localhost MODE # +k :sekret"
]
assert irc.channels["#"].key == "sekret"
await send(writer, [
"MODE # -k"
])
assert await readall(reader) == [
":foo!foo@localhost MODE # :-k"
]
assert irc.channels["#"].key is None
@pytest.mark.asyncio
async def test_topic():
async with server_conn() as (irc, reader, writer):
with mock.patch("time.time") as time_patch:
time_patch.return_value = 1562815441
await ident(reader, writer, irc, "foo")
await join(reader, writer, irc, "foo", "#")
await send(writer, [
"TOPIC #"
])
assert await readall(reader) == [
":localhost 331 foo :#"
]
await send(writer, [
"TOPIC # :hello world"
])
assert await readall(reader) == [
":localhost 332 foo # :hello world",
":localhost 333 foo # foo :1562815441",
]
channel = irc.get_channel("#")
assert channel.topic == "hello world"
await send(writer, [
"TOPIC #"
])
assert await readall(reader) == [
":localhost 332 foo # :hello world",
]
@pytest.mark.asyncio
async def test_invite():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await ident(reader_b, writer_b, irc, "bar")
await send(writer_a, [
"MODE # +i"
])
assert await readall(reader_a) == [
":foo!foo@localhost MODE # :+i"
]
channel = irc.get_channel("#")
nick_b = irc.get_nickname("bar")
assert channel.is_invite_only
assert not channel.can_join_channel(nick_b)
await send(writer_b, [
"JOIN #"
])
assert await readall(reader_b) == [
":localhost 473 bar :# :Cannot join channel (+i)"
]
await send(writer_a, [
"INVITE bar #"
])
assert await readall(reader_a) == [
":localhost 341 foo # :bar"
]
assert channel.can_join_channel(nick_b)
assert await readall(reader_b) == [
":foo!foo@localhost INVITE bar :#"
]
await send(writer_b, [
"JOIN #"
])
assert (await readall(reader_b))[:1] == [
":bar!bar@localhost JOIN :#"
]
@pytest.mark.asyncio
async def test_ban():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await ident(reader_b, writer_b, irc, "bar")
channel = irc.get_channel("#")
assert not channel.is_banned("bar!bar@localhost")
await send(writer_a, [
"MODE # +b *!*@localhost"
])
assert await readall(reader_a) == [
":foo!foo@localhost MODE # +b :*!*@localhost"
]
assert channel.is_banned("bar!bar@localhost")
await send(writer_b, [
"JOIN #"
])
assert await readall(reader_b) == [
":localhost 474 bar :# :Cannot join channel (+b)"
]
await send(writer_a, [
"MODE # +e *!*@localhost"
])
await readall(reader_a)
assert not channel.is_banned("bar!bar@localhost")
await send(writer_a, [
"MODE # -e *!*@localhost"
])
await readall(reader_a)
assert channel.is_banned("bar!bar@localhost")
await send(writer_a, [
"MODE # -b *!*@localhost"
])
await readall(reader_a)
assert not channel.is_banned("bar!bar@localhost")
@pytest.mark.asyncio
async def test_kick():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await ident(reader_b, writer_b, irc, "bar")
await send(writer_a, [
"MODE # +i"
])
assert await readall(reader_a) == [
":foo!foo@localhost MODE # :+i"
]
channel = irc.get_channel("#")
nick_b = irc.get_nickname("bar")
assert channel.is_invite_only
assert not channel.can_join_channel(nick_b)
await send(writer_a, [
"INVITE bar #"
])
assert await readall(reader_a) == [
":localhost 341 foo # :bar"
]
assert channel.can_join_channel(nick_b)
assert await readall(reader_b) == [
":foo!foo@localhost INVITE bar :#"
]
await join(reader_b, writer_b, irc, "bar", "#")
assert nick_b in channel.members
await send(writer_a, [
"KICK # bar :get out!"
])
assert await readall(reader_b) == [
":foo!foo@localhost KICK # bar :get out!"
]
assert nick_b not in channel.members
assert not channel.can_join_channel(nick_b)
@pytest.mark.asyncio
async def test_names():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await ident(reader_b, writer_b, irc, "bar")
await join(reader_b, writer_b, irc, "bar", "#")
assert await readall(reader_a) == [
":bar!bar@localhost JOIN :#",
]
await send(writer_a, [
"NAMES #"
])
assert await readall(reader_a) == [
":localhost 353 foo = # :bar foo",
":localhost 366 foo # :End of /NAMES list."
]
@pytest.mark.asyncio
async def test_list():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await ident(reader_b, writer_b, irc, "bar")
for name in ["#foo", "#bar", "#baz"]:
await join(reader_a, writer_a, irc, "foo", name)
channel = irc.channels[name]
await join(reader_b, writer_b, irc, "bar", name)
assert await readall(reader_a) == [
":bar!bar@localhost JOIN :" + name,
]
channel.topic = name * 3
await send(writer_a, [
"LIST"
])
assert await readall(reader_a) == [
":localhost 321 foo Channel Users :Name",
":localhost 322 foo #foo 2 :#foo#foo#foo",
":localhost 322 foo #bar 2 :#bar#bar#bar",
":localhost 322 foo #baz 2 :#baz#baz#baz",
":localhost 323 foo :End of /LIST",
]
@pytest.mark.asyncio
async def test_away():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await ident(reader_b, writer_b, irc, "bar")
await send(writer_a, [
"AWAY :gone fishin"
])
assert await readall(reader_a) == [
":localhost 306 foo :You have been marked as being away"
]
nickname = irc.get_nickname("foo")
assert nickname.is_away
assert nickname.away_message == "gone fishin"
await send(writer_b, [
"PRIVMSG foo :hello"
])
assert await readall(reader_b) == [
":foo!foo@localhost 301 bar foo :gone fishin"
]
await send(writer_a, [
"AWAY"
])
assert await readall(reader_a) == [
":localhost 305 foo :You are no longer marked as being away"
]
@pytest.mark.asyncio
async def test_server():
async with server_conn() as (irc, reader, writer):
assert len(irc.links) == 0
await send(writer, [
"SERVER foo 0 abcdef hello"
])
await readall(reader)
assert len(irc.links) == 1
@pytest.mark.asyncio
async def test_capabilities():
async with server_conn() as (irc, reader, writer):
await send(writer, [
"CAP BLAH",
])
resp = await readall(reader)
print(resp)
assert resp == [
':localhost 410 * BLAH :Invalid capability command'
]
await send(writer, [
"CAP LS",
"CAP REQ :foo bar baz",
])
resp = await readall(reader)
print(resp)
assert resp == [
':localhost CAP * LS :message-tags server-time message-ids sasl',
':localhost CAP * NAK :foo bar baz',
]
await ident(reader, writer, irc, "foo")
client = irc.lookup_client("foo")
assert client.capabilities == []
async with server_conn() as (irc, reader, writer):
with mock.patch("ircd.irc.IRC.get_capabilities") as caps_patch:
caps_patch.return_value = ["foo"]
await send(writer, [
"CAP LS",
"CAP REQ :foo",
])
resp = await readall(reader)
print(resp)
assert resp == [
':localhost CAP * LS :foo',
':localhost CAP * ACK :foo',
]
await ident(reader, writer, irc, "foo")
client = irc.lookup_client("foo")
assert client.capabilities == ["foo"]
@pytest.mark.asyncio
async def test_message_tags():
async with server_conn() as (irc, reader, writer):
# no tags cap
await ident(reader, writer, irc, "foo")
await send(writer, [
"@aaa=bbb;ccc;example.com/ddd=eee PRIVMSG foo :Hello",
])
resp = await readall(reader)
assert resp == [
':foo!foo@localhost PRIVMSG foo :Hello'
]
# enable tags cap
await send(writer, [
"CAP REQ :message-tags",
])
resp = await readall(reader)
assert resp == [
':localhost CAP foo ACK :message-tags'
]
await send(writer, [
"@aaa=bbb;ccc;+example.com/ddd=eee PRIVMSG foo :Hello",
])
resp = await readall(reader)
assert resp == [
'@+example.com/ddd=eee :foo!foo@localhost PRIVMSG foo :Hello'
]
@pytest.mark.asyncio
async def test_server_time():
time = datetime.datetime(2019, 12, 27, 1, 2, 3)
async with server_conn() as (irc, reader, writer):
await ident(reader, writer, irc, "foo")
with mock.patch("ircd.message.utcnow") as time_patch:
time_patch.return_value = time
# enable tags cap
await send(writer, [
"CAP REQ :message-tags server-time",
])
resp = await readall(reader)
assert resp == [
'@time=2019-12-27T01:02:03Z :localhost CAP foo ACK :message-tags server-time'
]
await send(writer, [
"@aaa=bbb;ccc;+example.com/ddd=eee PRIVMSG foo :Hello",
])
resp = await readall(reader)
assert resp == [
'@+example.com/ddd=eee;time=2019-12-27T01:02:03Z :foo!foo@localhost PRIVMSG foo :Hello'
]
@pytest.mark.asyncio
async def test_tagmsg_channel():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await join(reader_a, writer_a, irc, "foo", "#")
await send(writer_a, [
"CAP REQ :message-tags",
])
resp = await readall(reader_a)
assert resp == [
':localhost CAP foo ACK :message-tags'
]
await ident(reader_b, writer_b, irc, "bar")
await join(reader_b, writer_b, irc, "bar", "#")
await send(writer_b, [
"CAP REQ :message-tags",
])
resp = await readall(reader_b)
assert resp == [
':localhost CAP bar ACK :message-tags'
]
await send(writer_a, [
"@+example.com/ddd=eee TAGMSG #"
])
resp = await readall(reader_b)
assert resp == [
"@+example.com/ddd=eee :foo!foo@localhost TAGMSG :#"
]
@pytest.mark.asyncio
async def test_tagmsg_client():
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await ident(reader_a, writer_a, irc, "foo")
await send(writer_a, [
"CAP REQ :message-tags",
])
resp = await readall(reader_a)
assert resp == [
':localhost CAP foo ACK :message-tags'
]
await ident(reader_b, writer_b, irc, "bar")
await send(writer_a, [
"@+example.com/ddd=eee TAGMSG bar"
])
# no tagmsg
resp = await readall(reader_b)
assert resp == []
await send(writer_b, [
"CAP REQ :message-tags",
])
resp = await readall(reader_b)
assert resp == [
':localhost CAP bar ACK :message-tags'
]
await send(writer_a, [
"@+example.com/ddd=eee TAGMSG bar"
])
resp = await readall(reader_b)
assert resp == [
"@+example.com/ddd=eee :foo!foo@localhost TAGMSG :bar"
]
@pytest.mark.asyncio
async def test_message_ids():
async with server_conn() as (irc, reader, writer):
await ident(reader, writer, irc, "foo")
with mock.patch("ircd.message.generate_id") as id_patch:
id_patch.return_value = "XXX"
await send(writer, [
"CAP REQ :message-tags message-ids",
])
resp = await readall(reader)
print(resp)
assert resp == [
'@msgid=XXX :localhost CAP foo ACK :message-tags message-ids'
]
@pytest.mark.asyncio
async def test_sasl():
password = base64.b64encode(b"foo \x00 bar \x00 baz ")
bad_pass = base64.b64encode(b"qux \x00 qux \x00 qux ")
async with server_conn() as (irc, reader_a, writer_a), connect() as (reader_b, writer_b):
await send(writer_a, [
"CAP REQ :sasl",
])
resp = await readall(reader_a)
print(resp)
assert resp == [
':localhost CAP * ACK :sasl'
]
await send(writer_a, [
"AUTHENTICATE PLAIN",
])
resp = await readall(reader_a)
print(resp)
assert resp == [
':localhost AUTHENTICATE +'
]
# bad auth
await send(writer_a, [
"AUTHENTICATE xxx"
])
resp = await readall(reader_a)
print(resp)
assert resp == [
':localhost 904 * :SASL authentication failed',
]
# good auth
await send(writer_a, [
"AUTHENTICATE " + password.decode("utf-8"),
])
resp = await readall(reader_a)
print(resp)
assert resp == [
':localhost 900 * :you are now logged in',
':localhost 903 * :SASL authentication successful'
]
await send(writer_b, [
"CAP REQ :sasl",
])
resp = await readall(reader_b)
print(resp)
assert resp == [
':localhost CAP * ACK :sasl'
]
await send(writer_b, [
"AUTHENTICATE PLAIN",
])
resp = await readall(reader_b)
print(resp)
assert resp == [
':localhost AUTHENTICATE +'
]
# bad auth
await send(writer_b, [
"AUTHENTICATE " + bad_pass.decode(),
])
resp = await readall(reader_b)
print(resp)
assert resp == [
':localhost 904 * :SASL authentication failed',
]
@pytest.mark.asyncio
async def test_message_ids():
async with server_conn() as (irc, reader, writer):
await ident(reader, writer, irc, "foo")
await send(writer, [
"MOTD",
])
resp = await readall(reader)
print(resp)
assert resp == [
':localhost 375 foo :- message of the day -',
':localhost 372 foo :hello world',
':localhost 376 foo :- end of message -',
]
|
"""REST endpoint for the OpenAPI specification YAML file."""
from marshmallow import Schema, fields
import yaml
from .base import BaseResource
from .. import db
class OpenApiSchema(Schema):
info = fields.Dict()
paths = fields.Dict()
tags = fields.List(fields.Str())
openapi = fields.Str()
class OpenApiResource(BaseResource):
path = "/openapi"
schema = OpenApiSchema
operations = {
"get": {
"summary": "Get the OpenAPI specification",
"tags": ["info"],
"responses": {
200: {
"description": "OK",
"content": {"application/json": {"schema": schema}},
}
},
}
}
def get(self):
"""Return OpenAPI spec YAML from root dir."""
with open("openapi_spec.yaml", "r") as stream:
data_loaded = yaml.safe_load(stream)
return data_loaded
|
# Step 1: Import packages, functions, and classes
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib
from yellowbrick.regressor import prediction_error
from sklearn.linear_model import LassoCV
from yellowbrick.regressor.alphas import alphas
# Step 2: Get data
#df = pd.read_csv(r'C:\Users\shubh\PycharmProjects\cs4641\training_data.csv') # read the data
df = pd.read_csv(r'C:\Users\Jujin\Desktop\cs-4641-group-44\training_data.csv')
npdata = df.to_numpy()
x = npdata[:,3:] # x = covid data mtx
x = np.delete(x, 5, 1) # delete death col
ytemp = npdata[:,8] # y = death label data
y = np.zeros(len(ytemp))
for i in range(len(ytemp)): # to fix mixed datatype problem
if ytemp[i] == 1:
y[i] = 1
dftest = pd.read_csv(r'C:\Users\Jujin\Desktop\cs-4641-group-44\test_data.csv') # read the data
npdatatest = dftest.to_numpy()
xtest = npdatatest[:,3:] # x = covid data mtx
xtest = np.delete(xtest, 5, 1) # delete death col
ytemptest = npdatatest[:,8] # y = death label data
ytest = np.zeros(len(ytemptest))
for i in range(len(ytemptest)): # to fix mixed datatype problem
if ytemptest[i] == 1:
ytest[i] = 1
# Step 3: Create a model and train it
model = LogisticRegression(solver='liblinear', random_state=0)
model2 = LogisticRegression(solver='liblinear', random_state=0, class_weight='balanced', C = 1.0)
model.fit(x, y)
model2.fit(x, y)
# Step 4: Evaluate the model
p_pred = model.predict_proba(x)
y_pred = model.predict(x)
score_ = model.score(x, y)
conf_m = confusion_matrix(y, y_pred)
report = classification_report(y, y_pred)
# Print stuff
print('p_pred:', p_pred, sep='\n', end='\n\n')
print('y_pred:', y_pred, end='\n\n')
print('score_:', score_, end='\n\n')
print('conf_m:', conf_m, sep='\n', end='\n\n')
print('report:', report, sep='\n')
print(model.coef_)
print(model.intercept_)
#plt.plot(x[:,0], y)
#plt.show()
print(model.score(xtest,ytest))
alphas(LassoCV(random_state=0), x, y)
model2 = Lasso()
visualizer = prediction_error(model2, x, y, xtest, ytest)
#ROC,AUC
y_score2 = model.predict_proba(xtest)[:,1]
false_positive_rate2, true_positive_rate2, threshold2 = roc_curve(ytest, y_score2)
print('roc_auc_score for Logistic Regression: ', roc_auc_score(ytest, y_score2))
plt.subplots(1, figsize=(10,10))
plt.title('Receiver Operating Characteristic - Logistic regression')
plt.plot(false_positive_rate2, true_positive_rate2)
plt.plot([0, 1], ls="--")
plt.plot([0, 0], [1, 0] , c=".7"), plt.plot([1, 1] , c=".7")
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
|
class Vehicle:
DEFAULT_FUEL_CONSUMPTION = 1.25
def __init__(self, fuel, horse_power):
self.fuel = fuel
self.horse_power = horse_power
self.fuel_consumption = self.set_default_fuel_consumption()
@classmethod
def set_default_fuel_consumption(cls):
return cls.DEFAULT_FUEL_CONSUMPTION
def drive(self, kilometers):
needed_fuel = kilometers * self.fuel_consumption
if needed_fuel <= self.fuel and not self.fuel == 0:
self.fuel -= needed_fuel
|
import os
import tempfile
from unittest import TestCase
from sampleProcess import SampleProcess
"""
This a integration test
"""
class ITTestSampleProcess(TestCase):
def test_run(self):
# Arrange
sut = SampleProcess()
tmpout = tempfile.mkdtemp()
# Act
actual = sut.run(output_dir=tmpout)
# Assert
# Check the file downloaed exits is greater than zero bytes
self.assertTrue(os.path.getsize(actual) > 0)
|
from django.apps import AppConfig
class ValidationcwappConfig(AppConfig):
name = 'validationcwApp'
|
#!/usr/bin/env python
# Copyright (c) 2014 Pier Carlo Chiodi - http://www.pierky.com
# Licensed under The MIT License (MIT) - http://opensource.org/licenses/MIT
import json
import datetime
import smtplib
from core import *
from config import *
# returns None or failure reason
def DoesScheduleMatch( DateTime, Schedule ):
if "StartDate" in Schedule:
if DateTime.date() < datetime.datetime.strptime( Schedule["StartDate"], "%Y-%m-%d" ).date():
return "StartDate"
if "StopDate" in Schedule:
if DateTime.date() > datetime.datetime.strptime( Schedule["StopDate"], "%Y-%m-%d" ).date():
return "StopDate"
if "StartTime" in Schedule:
StartTime = datetime.datetime.strptime( Schedule["StartTime"], "%H:%M" ).time()
else:
StartTime = None
if "StopTime" in Schedule:
StopTime = datetime.datetime.strptime( Schedule["StopTime"], "%H:%M" ).time()
else:
StopTime = None
if StartTime and StopTime:
if StartTime < StopTime:
if DateTime.time() < StartTime or DateTime.time() > StopTime:
return "StartTime/StopTime"
else:
if DateTime.time() > StopTime and DateTime.time() < StartTime:
return "StopTime/StartTime"
elif StartTime:
if DateTime.time() < StartTime:
return "StartTime"
elif StopTime:
if DateTime.time() > StopTime:
return "StopTime"
if "DoW" in Schedule:
if not ( DateTime.date().isoweekday() in Schedule["DoW"] ):
return "DoW"
return None
def ProcessGraph( Graph ):
Error = None
Debug( "Processing schedules for graph '%s'" % Graph["Title"] )
if not ( "Schedules" in Graph["Scheduler"] ):
Error = "No schedules for this graph"
return Error
Query = Graph["Query"]
Error = NormalizeQuery( Query )
if not Error is None:
return Error
MaxCache = Graph["Scheduler"]["MaxCache"] if "MaxCache" in Graph["Scheduler"] else MAX_CACHE_NFDATA
Now = datetime.datetime.now()
Interval = datetime.timedelta( seconds = Query["SourceFiles"]["Interval"] or NETFLOW_INTERVAL )
Errors = []
StartDateTime = None
StopDateTime = None
SchedulesMatchFound = 0
# test 2 intervals before the current one + the current one
for DateTime in [ Now - Interval - Interval,
Now - Interval,
Now ]:
for Schedule in Graph["Scheduler"]["Schedules"]:
FailureReason = DoesScheduleMatch( DateTime, Schedule )
if FailureReason is None:
Debug( "DateTime %s OK for %s!" % ( DateTime, Schedule ) )
break
else:
Debug( "DateTime %s does not match for %s because of %s!" % ( DateTime, Schedule, FailureReason ) )
if FailureReason is None:
SchedulesMatchFound += 1
Error = FilesExist( DateTime, DateTime, Query )
if Error is None:
if StartDateTime is None:
StartDateTime = DateTime
StopDateTime = DateTime
else:
StopDateTime = DateTime
else:
Debug( Error )
if not StartDateTime is None:
Error = ProcessFiles( StartDateTime, StopDateTime, Query, MaxCache )[0]
if not Error is None:
Errors.append( Error )
else:
# error only if more than one datetime have been tested and none could be processed
if SchedulesMatchFound > 1:
Errors.append( "Can't find netflow data files to process for this graph" )
if Errors != []:
return ", ".join( Errors )
else:
return None
def SchedulerMain():
LogFilePath = "%s/%s" % ( VAR_DIR, "scheduler.log" )
if "SCHEDULER_LOG_FILEPATH" in globals():
LogFilePath = SCHEDULER_LOG_FILEPATH
SetupLogging( LogFilePath )
if os.path.isfile( "%s/%s" % ( VAR_DIR, GRAPHS_FILENAME ) ):
try:
JSON_Data = open( "%s/%s" % ( VAR_DIR, GRAPHS_FILENAME ) )
Graphs = json.load(JSON_Data)
JSON_Data.close()
except:
Error( "Scheduler error while reading graphs from %s" % ( "%s/%s" % ( VAR_DIR, GRAPHS_FILENAME ) ) )
for GraphID in Graphs:
Graph = Graphs[GraphID]
if "Scheduler" in Graph:
Error = ProcessGraph( Graph )
if not Error is None:
Error( "Scheduler error while processing graph '%s'\n\n%s" % ( Graph["Title"], Error ) )
SchedulerMain()
|
from DAPI import *
from ctypes import *
import LAPI
import sys
rcmap = dict(zip("ACGTacgtNn-","TGCATGCANN-"))
def rc(seq):
return "".join([rcmap[c] for c in seq[::-1]])
ovl_data = LAPI.get_ovl_data(sys.argv[1])
db = DAZZ_DB()
open_DB(sys.argv[2], db)
trim_DB(db)
aln = LAPI.Alignment()
aln.aseq = LAPI.new_read_buffer(db)
aln.bseq = LAPI.new_read_buffer(db)
count = 0
for aread in ovl_data:
LAPI.load_read(db, aread, aln.aseq, 2)
aseq = cast( aln.aseq, c_char_p)
aseq = aseq.value
print "%08d" % aread, aseq
for aln_data in ovl_data[aread]:
aread, bread, acc, abpos, aepos, alen, comp, bbpos, bepos, blen = aln_data
LAPI.load_read(db, bread, aln.bseq, 2)
bseq = cast(aln.bseq, c_char_p)
bseq = bseq.value
bseq = bseq[bbpos:bepos]
#load_read(db, ovl.bread, aln.bseq, 2)
if comp == 1:
bseq = rc(bseq)
print bread, bseq
print "+ +"
count += 1
print "- -"
close_DB(db)
|
my_a = np.array([2, 3, 5, 7, 10])
my_sigma_a = np.array([0.2, 0.3, 0.4, 0.7, 0.9])
my_b = np.array([2, 3, 6, 4, 8])
my_sigma_b = np.array([0.3, 0.3, 0.5, 0.5, 0.5])
# errors propagated using custom functions
my_sum_ab_l, my_sigma_sum_ab_l = sum_ab(a=my_a, b=my_b, sigma_a=my_sigma_a, sigma_b=my_sigma_b)
my_division_ab_l, my_sigma_division_ab_l = division_ab(a=my_a, b=my_b, sigma_a=my_sigma_a, sigma_b=my_sigma_b)
# errors propagated using the symbolic approach
my_sum_ab_s, my_sigma_sum_ab_s = symbolic_error_prop(func=a+b, val_a=my_a, val_sigma_a=my_sigma_a, val_b=my_b, val_sigma_b=my_sigma_b)
my_division_ab_s, my_sigma_division_ab_s = symbolic_error_prop(func=a/b, val_a=my_a, val_sigma_a=my_sigma_a, val_b=my_b, val_sigma_b=my_sigma_b)
fig = plt.figure(figsize=(8, 8))
ax1 = fig.add_subplot(2, 2, 1)
ax1.errorbar(x=my_a, y=my_sum_ab_l, xerr=my_sigma_a, yerr=my_sigma_sum_ab_l, linestyle='', marker='o', ecolor='k', elinewidth=0.5, capsize=1, label='Errors by custom functions')
ax1.set_xlabel('a')
ax1.set_ylabel('a + b')
ax1.legend()
ax2 = fig.add_subplot(2, 2, 2)
ax2.errorbar(x=my_a, y=my_sum_ab_s, xerr=my_sigma_a, yerr=my_sigma_sum_ab_s, linestyle='', marker='o', ecolor='k', elinewidth=0.5, capsize=1, label='Errors by the symbolic approach')
ax2.set_xlabel('a')
ax2.set_ylabel('a + b')
ax2.legend()
ax3 = fig.add_subplot(2, 2, 3)
ax3.errorbar(x=my_a, y=my_division_ab_l, xerr=my_sigma_a, yerr=my_sigma_division_ab_l, linestyle='', marker='o', ecolor='k', elinewidth=0.5, capsize=1, label='Errors by custom functions')
ax3.set_xlabel('a')
ax3.set_ylabel('a / b')
ax3.legend()
ax4 = fig.add_subplot(2,2,4)
ax4.errorbar(x=my_a, y=my_division_ab_s, xerr=my_sigma_a, yerr=my_sigma_division_ab_s, linestyle='', marker ='o', ecolor='k', elinewidth=0.5, capsize=1, label='Errors by the symbolic approach')
ax4.set_xlabel('a')
ax4.set_ylabel('a / b')
ax4.legend()
fig.tight_layout() |
"""
[2016-04-29] Challenge #264 [Hard] Detecting Poetry Forms
https://www.reddit.com/r/dailyprogrammer/comments/4gzeze/20160429_challenge_264_hard_detecting_poetry_forms/
# Description
This is a variant of last week's intermediate challenge and was inspired by a comment from /u/Agent_Epsilon. In short,
given a piece of poetry can you write a program to tell you what [rhyme
scheme](https://en.wikipedia.org/wiki/Rhyme_scheme) it has?
From that challenge: we'll use the [SPHINX project](http://cmusphinx.sourceforge.net/) from Carnegie Mellon University
to detect if words rhyme. Use [this pronouncing
dictionary](http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b) in conjunction with [this phoneme
description](http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b.phones) to find rhyming words. Note
that the dictionary uses the [ARPAbet](https://en.wikipedia.org/wiki/Arpabet) phonetic transcription code and includes
stress indicators for the vowel sounds.
# Input Description
You'll be given a poem in plain text, with line breaks as expected. Example:
A bather whose clothing was strewed
By winds that left her quite nude
Saw a man come along
And unless we are wrong
You expected this line to be lewd.
# Output Description
Your program should emit the rhyme scheme found in the poem. From the above example:
aabba
(It's a Limerick.)
# Challenge Input
There once was a young lady named bright
Whose speed was much faster than light
She set out one day
In a relative way
And returned on the previous night.
--
Once upon a midnight dreary, while I pondered, weak and weary,
Over many a quaint and curious volume of forgotten lore—
While I nodded, nearly napping, suddenly there came a tapping,
As of some one gently rapping, rapping at my chamber door.
"'Tis some visiter," I muttered, "tapping at my chamber door—
Only this and nothing more."
--
Brothers, who when the sirens roar
From office, shop and factory pour
'Neath evening sky;
By cops directed to the fug
Of talkie-houses for a drug,
Or down canals to find a hug
--
Two roads diverged in a yellow wood,
And sorry I could not travel both
And be one traveler, long I stood
And looked down one as far as I could
To where it bent in the undergrowth;
# Challenge Output
aabba
abcbbb
aabccc
abaab
"""
def main():
pass
if __name__ == "__main__":
main()
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
def get_loss_function(loss_name):
if loss_name == 'ce':
return nn.CrossEntropyLoss()
elif loss_name == 'bce':
return nn.BCEWithLogitsLoss()
else:
raise ValueError
def perform_epoch(model, loader, optimizer=None, max_batch_count=None, device='cpu', loss_name='ce'):
loss_function = get_loss_function(loss_name)
cum_loss = 0
cum_acc = 0
cum_batch_size = 0
batch_count = 0
with torch.no_grad() if optimizer is None else torch.enable_grad():
#for X, y in tqdm(loader):
for X, y in loader:
X = X.to(device)
y = y.to(device)
batch_size = X.shape[0]
logits = model(X)
if loss_name == 'bce':
logits = logits.view(-1)
acc = torch.mean((logits * (y*2-1) > 0).float())
loss = loss_function(logits, y.float())
else:
acc = torch.mean((torch.max(logits, dim=-1)[1] == y).float())
loss = loss_function(logits, y)
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
cum_loss += loss.item() * batch_size
cum_acc += acc.item() * batch_size
cum_batch_size += batch_size
batch_count += 1
if max_batch_count is not None and batch_count >= max_batch_count:
break
mean_loss = cum_loss / cum_batch_size
mean_acc = cum_acc / cum_batch_size
return mean_loss, mean_acc
def get_tangent_kernels(model, loader, optimizer, batch_size=None, device='cpu'):
for hid_layer in model.hidden_layers:
if hasattr(hid_layer, 'weight'):
raise NotImpementedError
sample_count = 0
stop_flag = False
input_ntk_diag = []
output_ntk_diag = []
for X, _ in loader:
X = X.to(device)
logits = model(X)
if logits.shape[-1] > 1:
raise NotImpementedError
else:
logits = logits.view(-1)
for logit in logits:
grads_at_x = torch.autograd.grad([logit], [model.input_layer.weight, model.output_layer.weight], retain_graph=True)
input_ntk_diag.append(torch.sum(grads_at_x[0] ** 2) * optimizer.param_groups[0]['lr'])
output_ntk_diag.append(torch.sum(grads_at_x[1] ** 2) * optimizer.param_groups[2]['lr'])
sample_count += 1
if batch_size is not None and sample_count >= batch_size:
stop_flag = True
break
if stop_flag:
break
input_ntk_diag = torch.stack(input_ntk_diag).cpu().numpy()
output_ntk_diag = torch.stack(output_ntk_diag).cpu().numpy()
return input_ntk_diag, None, output_ntk_diag
def get_logits(model, loader, max_batch_count=None, device='cpu'):
logits = []
batch_count = 0
with torch.no_grad():
for X, _ in loader:
X = X.to(device)
logits.append(model(X).cpu())
batch_count += 1
if max_batch_count is not None and batch_count >= max_batch_count:
break
logits = torch.cat(logits, dim=0)
logits = logits.numpy()
return logits |
from functools import reduce
from parsy import string, regex, generate, fail
from chr.ast import *
'''
integer ::= [0-9]+
string ::= '"' .* '"'
key_value ::= term ':' term
dict ::= '{' key_value { ',' key_value }* '}'
list ::= '[' term { ',' term }* ']'
symbol ::= [a-z][a-zA-Z0-9_-]*
variable ::= [A-Z_][a-zA-Z0-9_-]*
functor ::= symbol { '(' term { ',' term }* ')' }
operator ::= [+*/%-]
term ::= variable | functor | integer | string | list | dict
rule ::= { symbol @ } ( simplification | propagation | simpagation )
constraints ::= functor { ',' functor }*
guard ::= constraints '|'
simplification ::= constraints '<=>' { guard } constraints '.'
propagation ::= constraints '==>' { guard } constraints '.'
simpagation ::= constraints '\' constraints '<=>' { guard } constraints '.'
signature ::= symbol '/' [0-9]+
decl ::= 'constraints' signature { ',' signature }* '.'
'''
lit_symbol = regex(r'[a-z][a-zA-Z0-9_]*')
lit_operator = regex(r'\'[^\t\n\r ]+\'')
lit_variable = regex(r'[a-zA-Z_][a-zA-Z0-9_]*')
lit_number = regex(r'[0-9]+')
lit_string = regex(r'\"[^\"]*\"')
lit_white = regex(r'[\n\t ]*')
lit_signature = regex(r'[a-z][a-zA-Z0-9_-]*/[0-9]+')
lit_class_name = regex(r'[A-Za-z_][A-Za-z_0-9]*')
def token(s):
if type(s) is not str:
raise TypeError(f'{s}: {str} expected; got {type(s)}')
@generate
def fun():
nonlocal s
t = yield lit_white >> string(s) << lit_white
return t
return fun
comma = token(',')
infix_term_ops = [
[("-", 1)],
[("*", 2), ("/", 2), ("%", 2)],
[("+", 2), ("-", 2)],
[("not", 1)],
[("not in", 2), ("in", 2),
("is not", 2), ("is", 2),
("==", 2), ("!=", 2),
("<=", 2), ("<", 2), (">=", 2), (">", 2)
],
[("and", 2), ("or", 2)],
[("=", 2)]
]
def mk_infix_term_parser(term_parser, operators):
un_ops = [token(op) for op, ar in operators if ar == 1]
bin_ops = [token(op) for op, ar in operators if ar == 2]
un_op = reduce(lambda l, r: l | r, un_ops) if un_ops else None
bin_op = reduce(lambda l, r: l | r, bin_ops) if bin_ops else None
@generate
def fun():
u = None
if un_op:
u = yield un_op.optional()
left = yield term_parser
if u:
left = Term(u, params=[left])
chained = []
if bin_op:
while True:
op = yield bin_op.optional()
if not op:
break
v = None
if un_op:
v = yield un_op.optional()
right = yield term_parser
if v:
right = Term(v, params=[right])
chained.append((op, right))
return reduce(lambda l, r: Term(r[0], params=[l, r[1]]), chained, left)
return fun
@generate
def parse_prefix_term():
symbol = yield lit_symbol | (token("'") >> regex("[^\n\t ']+") << token("'"))
args = []
br_open = yield string('(').optional()
if br_open:
t = yield lit_white >> parse_term
args.append(t)
while True:
c = yield lit_white >> string(',').optional()
if not c:
break
t = yield lit_white >> parse_term
args.append(t)
yield lit_white >> string(')')
return Term(symbol, args)
@generate
def parse_variable():
varname = yield lit_white >> string('$') >> lit_variable
return Var(varname)
@generate
def parse_integer():
number = yield lit_white >> lit_number
return int(number)
@generate
def parse_string():
s = yield lit_white >> lit_string
return s[1:-1]
@generate
def parse_bool():
s = yield lit_white >> (string('False') | string('True'))
return s == "True"
@generate
def parse_empty_list():
yield token('[') + lit_white + token(']')
return []
@generate
def parse_non_empty_list():
yield token('[')
ts = yield (lit_white >> parse_term << comma).many()
t = yield parse_term.optional()
yield token(']')
return ts + ([t] if t else [])
@generate
def parse_list():
ts = yield parse_empty_list | parse_non_empty_list
return ts
@generate
def parse_key_value():
k = yield parse_term
if not is_ground(k):
fail(f'{k} not ground')
yield token(':')
v = yield parse_term
return k, v
@generate
def parse_dict():
yield token('{')
items = yield (parse_key_value << comma).many()
last = yield parse_key_value.optional()
yield token('}')
return dict(items + ([last] if last else []))
@generate
def parse_tuple():
yield token('(')
es = yield (lit_white >> parse_term << comma).at_least(1)
last = yield parse_term.optional()
yield token(')')
return tuple(es + ([last] if last else []))
@generate
def parse_atom():
result = yield lit_white >> (
parse_integer |
parse_bool |
parse_string |
parse_variable |
parse_list |
parse_tuple |
parse_dict |
parse_prefix_term |
token('(') >> parse_term << token(')')
)
return result
@generate
def parse_term():
result = yield lit_white >> (
parse_infix_term(infix_term_ops) |
parse_atom
)
return result
def parse_infix_term(op_table, acc=parse_atom):
if not op_table:
return acc
return parse_infix_term(
op_table[1:],
acc=mk_infix_term_parser(acc, op_table[0])
)
@generate
def parse_constraints():
c = yield lit_white >> parse_term
args = [c]
while True:
sep = yield lit_white >> string(',').optional()
if not sep:
break
c1 = yield lit_white >> parse_term
args.append(c1)
return args
@generate
def parse_guard():
cs = yield lit_white >> parse_constraints
yield lit_white >> string('|')
return cs
@generate
def parse_body():
gs = yield lit_white >> parse_guard.optional()
cs = yield lit_white >> parse_constraints
return gs, cs
@generate
def parse_rule_name():
name = yield lit_white >> lit_symbol
yield lit_white >> string('@')
return name
def parse_rule(rule_name_generator):
@generate
def fun():
name = yield lit_white >> parse_rule_name.optional()
if not name:
name = rule_name_generator()
kept, removed, guard, body = yield \
parse_simplification \
| parse_propagation \
| parse_simpagation
yield lit_white >> string('.')
return Rule(name, kept, removed, guard if guard else [], body)
return fun
@generate
def parse_simplification():
hs = yield lit_white >> parse_constraints
yield lit_white >> string('<=>')
gs, bs = yield lit_white >> parse_body
return [], hs, gs, bs
@generate
def parse_propagation():
hs = yield lit_white >> parse_constraints
yield lit_white >> string('==>')
gs, bs = yield lit_white >> parse_body
return hs, [], gs, bs
@generate
def parse_simpagation():
ks = yield lit_white >> parse_constraints
yield lit_white >> string('\\')
rs = yield lit_white >> parse_constraints
yield lit_white >> string('<=>')
gs, bs = yield lit_white >> parse_body
return ks, rs, gs, bs
def parse_rules(rule_name_generator):
@generate
def fun():
rules = []
while True:
rule = yield lit_white >> parse_rule(rule_name_generator).optional()
if not rule:
break
rules.append(rule)
return rules
return fun
@generate
def parse_signature():
signature = yield lit_white >> lit_signature
return signature
@generate
def parse_declaration():
yield lit_white >> string("constraints")
c = yield lit_white >> parse_signature
cs = [c]
while True:
comma = yield lit_white >> (string(',') | string('.'))
if comma == '.':
break
c1 = yield lit_white >> parse_signature
cs.append(c1)
return cs
@generate
def parse_class_name():
yield lit_white >> string("class")
c = yield lit_white >> lit_class_name
yield token(".")
return c
def parse_program():
next_rule_id = 0
def rule_name_gen():
nonlocal next_rule_id
next_rule_id += 1
return f'rule_{next_rule_id - 1}'
@generate
def fun():
class_name = yield parse_class_name
decls = yield parse_declaration
rules = yield parse_rules(rule_name_gen)
return Program(class_name, decls, rules)
return fun
def chr_parse(source):
return parse_program().parse(source)
|
from eth_keys import keys
from eth_utils import decode_hex
from tests.core.integration_test_helpers import FUNDED_ACCT, load_mining_chain
from .churn_state.builder import (
delete_churn,
deploy_storage_churn_contract,
update_churn,
)
RECEIVER = keys.PrivateKey(
decode_hex("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291"))
def build_pow_fixture(write_db, num_blocks=20):
chain = load_mining_chain(write_db)
recipient_address = RECEIVER.public_key.to_canonical_address()
for i in range(num_blocks):
tx = chain.create_unsigned_transaction(
nonce=i,
gas_price=1234,
gas=123400,
to=recipient_address,
value=i,
data=b'',
)
chain.apply_transaction(tx.as_signed_transaction(FUNDED_ACCT))
chain.mine_block()
return chain.chaindb
def build_pow_churning_fixture(write_db, num_blocks=40):
chain = load_mining_chain(write_db)
contract_addr = deploy_storage_churn_contract(chain)
half_blocks = num_blocks // 2
nymph_contracts, nonce = update_churn(chain, contract_addr, num_blocks=half_blocks)
delete_churn(chain, nymph_contracts, contract_addr, start_nonce=nonce, num_blocks=half_blocks)
return chain, contract_addr
|
# To Create dummy date (use in case of inavilability of data)
import random
start = [11, 5 ,2006]
end = [4, 12, 2010]
interval = [20, 26, 23, 19, 29]
def giveDate(previous):
inter = random.choice(interval)
previous[0] = previous[0] + inter
if previous[0] > 30:
previous[0] = previous[0] - 30
previous[1] = previous[1] + 1
if previous[1] > 12:
previous[1] = previous[1] - 12
previous[2] = previous[2] + 1
if previous[0] <= 0:
previous[0] = 1
if previous[1] <= 0:
previous[1] = 1
return previous
with open("Date.csv", "a") as file:
while start[2] <= 2012:
start = giveDate(start)
file.writelines(f"{start[0]},{start[1]},{start[2]}\n")
file.close() |
'''
Module specific config
'''
import configparser
import os.path
import json
from simplesensor.shared import ThreadsafeLogger
def load(loggingQueue, name):
""" Load module specific config into dictionary, return it"""
logger = ThreadsafeLogger(loggingQueue, '{0}-{1}'.format(name, 'ConfigLoader'))
thisConfig = {}
configParser = configparser.ConfigParser()
thisConfig = load_secrets(thisConfig, logger, configParser)
thisConfig = load_module(thisConfig, logger, configParser)
return thisConfig
def load_secrets(thisConfig, logger, configParser):
""" Load module specific secrets """
try:
with open("./config/secrets.conf") as f:
configParser.readfp(f)
except IOError:
configParser.read(os.path.join(os.path.dirname(__file__),"./config/secrets.conf"))
exit
return thisConfig
def load_module(thisConfig, logger, configParser):
""" Load module config """
try:
with open("./config/module.conf") as f:
configParser.readfp(f)
except IOError:
configParser.read(os.path.join(os.path.dirname(__file__),"./config/module.conf"))
exit
"""websocket host"""
try:
configValue=configParser.get('ModuleConfig','websocket_host')
except:
configValue = '127.0.0.1'
logger.info("Websocket server host : %s" % configValue)
thisConfig['WebsocketHost'] = configValue
"""websocket port"""
try:
configValue=configParser.getint('ModuleConfig','websocket_port')
except:
configValue = 13254
logger.info("Websocket server port : %s" % configValue)
thisConfig['WebsocketPort'] = configValue
return thisConfig |
#!/usr/bin/env python
# -*- noplot -*-
from __future__ import print_function
import os
import matplotlib.pyplot as plt
import numpy as np
files = []
fig, ax = plt.subplots(figsize=(5, 5))
for i in range(50): # 50 frames
plt.cla()
plt.imshow(np.random.rand(5, 5), interpolation='nearest')
fname = '_tmp%03d.png' % i
print('Saving frame', fname)
plt.savefig(fname)
files.append(fname)
print('Making movie animation.mpg - this make take a while')
os.system("mencoder 'mf://_tmp*.png' -mf type=png:fps=10 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o animation.mpg")
#os.system("convert _tmp*.png animation.mng")
# cleanup
for fname in files: os.remove(fname)
|
'''
Pychecko is a microframework to compose
the methods in an instance in real time.
'''
import collections
import inspect
import types
from abc import (
ABCMeta,
abstractmethod,
)
class PyCheckoException(Exception):
'''Most genetic exception in Pychecko.'''
pass
class InvalidRuleError(PyCheckoException):
'''
This exception will be thrown when the rule sent to
Pychecko is not a Boolean condition.
'''
def __init__(self, msgError=None):
if msgError:
self.message = msgError
class InvalidMethodError(PyCheckoException):
'''
This exception will be thrown when the method sent to
Pychecko is not a real method.
'''
def __init__(self, msgError=None):
if msgError:
self.message = msgError
class InvalidSignatureInputTypeError(PyCheckoException):
'''
This exception will be thrown when the signature has not
completely attended by the methods passed to PyChecko
'''
def __init__(self, msgError=None):
if msgError:
self.message = msgError
class InvalidSignatureClassError(PyCheckoException):
'''
This exception will be thrown when the signature has not
completely attended by the methods passed to PyChecko
'''
def __init__(self, msgError=None):
if msgError:
self.message = msgError
class InvalidInputError(PyCheckoException):
'''
This exception will be thrown when the signature has not
completely attended by the inputs passed to PyChecko
'''
def __init__(self, msgError=None):
if msgError:
self.message = msgError
class Pychecko:
def __init__(self, instance, signature=None):
self.__methods_to_add = list()
self.__instance = instance
if (
signature and
(
not isinstance(signature, (tuple, list)) or
not all(isinstance(s, str) for s in signature)
)
):
raise InvalidSignatureInputTypeError(
"Type is: {}. The type should be tuple or list"
.format(type(signature))
)
self.__signature = signature
def add(self, method, rules=[]):
'''
The method is responsible to add all methods to compose an instance.
:param method: is the method that will be applied over the instance
:param rules: (optional) a list of boolean conditions to check if the
method should be applied in execution time.
'''
self.__inputs_validate(method, rules)
if all(rules):
self.__methods_to_add.append(method)
def bulk_add(self, methods, rules=[]):
self.__validate_iterable(methods)
for method in methods:
self.add(method, rules)
def __inputs_validate(self, method, rules=[]):
'''
Method resposible to validade if the inputs are right to
apply over the instance.
'''
if not hasattr(method, '__call__'):
raise InvalidMethodError(
"The {method} isn't a valid method".format(
method=method,
)
)
self.__validate_iterable(rules)
for rule in rules:
if not isinstance(rule, bool):
raise InvalidRuleError(
"The {rule} isn't a boolean condition".format(
rule=rule,
)
)
def __validate_iterable(self, iterable):
if not isinstance(iterable, collections.Iterable):
raise InvalidInputError('The input is not an Iterable')
@property
def class_signature(self):
return self.__instance
@property
def execute(self):
'''
Property responsible to apply all method that the rules match
and prepare the instance
:return: modified instance
'''
for method in self.__methods_to_add:
setattr(
self.__instance,
method.__name__,
types.MethodType(method, self.__instance),
)
if (
self.__signature and not
set(self.__signature).issubset(self.__instance.__dict__)
):
raise InvalidSignatureClassError()
return self.__instance
class PycheckoComponent:
__metaclass__ = ABCMeta
@abstractmethod
def is_applied(self):
pass
class PycheckoClassModifier:
"""
Class reponsible to modify an instance and create the composition.
Must be sent to the class an Intance and the components list that
will be applied over the instance.
* The Pychecko Framework manipulate just Instances and Methods.
No attributes, properties or just class signature.
All the params sent to the Framework must be instantiated
* The component list sent has precedence order priority
from the last to the first item in the list
* All the components in the component list must be a
PycheckoComponent by inheritance and implement the method
**is_apply**. The is_apply must return a boolean condition.
* If the methods in the component list are in the instance calulator,
the method in the instance calulator will be overwrited.
* If the methods in the component list are not in the instance
calulator, these methods will be added.
* If some instance in component list is not a PycheckoComponent
will be thrown an InvalidInputError.
* If after read all the itens in component list no methods
are found will be thrown an InvalidMethodError.
* After process all the used the property **get_modified_instance**
to get the modified instance.
:param: instance\n
:param: components (optional)
"""
def __init__(self, instance, components=[]):
self.__instance = instance
for component in components:
if not issubclass(component.__class__, PycheckoComponent):
raise InvalidInputError(
'{} are not a Pychecko Component'.format(
component.__class__.__name__
)
)
self.__components = components
@property
def execute(self):
"""
Property responsible to apply all method that the rules match
and prepare the instance
:return: modified instance
"""
for component in self.__components:
methods = inspect.getmembers(component, inspect.ismethod)
methods = [
m
for m in methods
if not m[0].startswith('_') and
not m[0] == 'is_applied'
]
if not methods:
raise InvalidMethodError(
'There are no methods in the component {}'.format(
component.__class__.__name__
)
)
if component.is_applied():
for method in methods:
method_name = method[0]
setattr(
self.__instance,
method_name,
types.MethodType(
component.__class__.__dict__[method_name],
self.__instance
),
)
return self.__instance
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 3 21:56:51 2021
@author: giuseppeperonato
"""
import csv
import json
class Recipe():
def __init__(self, name):
self.name = name
self.ingredients = {}
self.content = {}
self.cooking_steps = []
self.db = []
self.energy_ef= []
self.intake= []
with open('data/food/data.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
self.db.append(row)
with open('data/energy/data.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
self.energy_ef.append(row)
with open('data/food/intake/data.json', "r") as f:
self.intake = json.loads(f.read())
self.total_content = {}
for key in list(self.db[0].keys()):
if key != "LCI Name":
self.total_content[key.split(" (")[0]] = {"value":0,
"unit":key.split(" (")[1].split("/")[0]}
def addIngredient(self,name,quantity):
self.ingredients[name] = {"quantity": quantity}
def removeIngredient(self,name):
del self.ingredients[name]
def addIngredients(self,ingredients):
for ingredient in ingredients:
self.addIngredient(ingredient[0],ingredient[1])
def addCookingStep(self,energy_source,duration,power):
self.cooking_steps.append({"duration": duration,
"energy_source": energy_source,
"power": power})
def cook(self):
for step in self.cooking_steps:
energy = step["duration"]/60. * step["power"] / 1000.
print(self.find_EF(step["energy_source"]))
step["CO2e"] = energy * self.find_EF(step["energy_source"])
self.total_content["Carbon footprint"]["value"] += step["CO2e"]
self.compare()
def mise_en_place(self):
self.weight = 0
for name in self.ingredients.keys():
self.content[name] = {}
entries = self.add_values(name)
for key, value in entries.items():
if key == "Carbon footprint (kgCO2e/kg)":
self.content[name][key] = value * self.ingredients[name]["quantity"]/1000.
else:
try:
self.content[name][key] = value * (self.ingredients[name]["quantity"]/100.)
except:
self.content[name][key] = 0
self.total_content[key.split(" (")[0]]["value"] += self.content[name][key]
self.weight += self.ingredients[name]["quantity"]
self.compare()
def add_values(self,name):
entries = {}
for entry in self.db:
if entry["LCI Name"] == name:
for key, value in entry.items():
try:
entries[key] = float(value)
except:
pass
return entries
def find_EF(self,energy):
for entry in self.energy_ef:
if entry["Name_Location"] == energy:
return float(entry["EF"])
def compare(self,meal="Hamburger, from fast foods restaurant",quantity=220):
reference = self.add_values(meal)
for key, value in reference.items():
name = key.split(" (")[0]
comparison = ""
recommended = ""
if value > 0:
if key == "Carbon footprint (kgCO2e/kg)":
comparison = round(self.total_content[name]["value"] / (value*(quantity/1000.))*100,2)
else:
comparison = round(self.total_content[name]["value"] / (value*(quantity/100.)) * 100,2)
recommended = round(self.total_content[name]["value"]/self.intake[name]["value"] * 100,2)
self.total_content[name]["benchmark"] = {"name":meal,"weight (g)": quantity, "value": comparison}
self.total_content[name]["recommended"] = recommended
my_recipe = Recipe("Pasta broccoli e aggiughe")
# my_recipe.addIngredient("Dried pasta, wholemeal, raw", 100)
# my_recipe.addIngredient("Olive oil, extra virgin", 2)
# my_recipe.addIngredient("Anchovy, in salt (semi-preserved)", 15)
# my_recipe.addIngredient("Romanesco cauliflower or romanesco broccoli, raw", 200)
my_recipe.addIngredients([("Dried pasta, wholemeal, raw", 100),
("Olive oil, extra virgin", 2),
("Anchovy, in salt (semi-preserved)", 12),
("Romanesco cauliflower or romanesco broccoli, raw", 250)])
my_recipe.mise_en_place()
print(my_recipe.name)
print(my_recipe.ingredients)
print(my_recipe.content)
print("Weight",my_recipe.weight,"g")
print(my_recipe.total_content)
my_recipe.addCookingStep("Electricity (cooking) - France continentale", 15, 2500)
print(my_recipe.cooking_steps)
print(my_recipe.total_content)
my_recipe.cook()
my_recipe2 = Recipe("Tagliatelle al ragù")
my_recipe2.addIngredients([("Dried egg pasta, raw", 100),
("Olive oil, extra virgin", 2),
("Beef, minced steak, 20% fat, cooked", 75),
("Carrot, raw", 12),
("Celery stalk, raw", 12),
("Onion, raw", 12),
("Tomato coulis, canned (tomato puree semi-reduced 11%)", 75)])
my_recipe2.mise_en_place()
print(my_recipe2.name)
print(my_recipe2.ingredients)
print("Weight",my_recipe2.weight,"g")
print(my_recipe2.total_content)
print("")
|
# -*- coding: utf-8 -*-
from functools import wraps
from .error import (UnknowResponse, LoginRequired, AuthenticationFailed,
DetailedHTTPError, UnknownShow, UnknownEpisode)
from .httpclient import HTTPClient, HTTPClientBase
from .models import Show, ShowDetails, EpisodeDetails
from .constants import ShowTypes, SortBy, SortOrder
__all__ = ['FunimationLater']
def require_login(func):
"""Decorator that throws an error when user isn't logged in.
Args:
func: The function to wrap
"""
@wraps(func)
def wrapper(*args, **kwargs):
# args[0] will always be self
if not args[0].logged_in:
raise LoginRequired('must be logged in')
return func(*args, **kwargs)
return wrapper
class FunimationLater(object):
host = 'api-funimation.dadcdigital.com'
base_path = '/xml'
protocol = 'https'
default_limit = 20
def __init__(self, username=None, password=None, http_client=None):
"""
Args:
username (str): The users email address
password (str): The password
http_client: Must be an instance of HTTPClient
"""
full_url = '{}://{}{}'.format(self.protocol, self.host, self.base_path)
if http_client is None:
self.client = HTTPClient(full_url)
else:
# NOTE(Sinap): using assert instead of raise here because we should
# never use a client that is not a subclass of HTTPClientBase.
assert issubclass(http_client, HTTPClientBase), \
'http_client must be a subclass of HTTPClientBase'
self.client = http_client(full_url)
self.logged_in = False
if username and password:
self.login(username, password)
def login(self, username, password):
"""Login and set the authentication headers
Args:
username (str): The users email address
password (str): The password
"""
resp = self.client.post('/auth/login/?',
{'username': username, 'password': password})
if 'error' in resp['authentication']:
raise AuthenticationFailed('username or password is incorrect')
# the API returns what headers should be set
self.client.add_headers(resp['authentication']['parameters']['header'])
self.logged_in = True
@require_login
def get_my_queue(self):
"""Get the list of shows in the current users queue
Returns:
list[funimationlater.models.Show]:
"""
resp = self.client.get('/myqueue/get-items/?')
if resp['watchlist']['items'] is not None:
return [Show(x['item'], self.client) for x in
resp['watchlist']['items']['item']]
else:
return []
@require_login
def add_to_queue(self, show_stub):
"""Add a show to the current users queue.
Args:
show_stub (str): This is a 3 letter code for the show to add.
"""
self.client.get('myqueue/add/', {'id': show_stub})
@require_login
def remove_from_queue(self, show_stub):
"""Remove a show from the current users queue.
Args:
show_stub (str): This is a 3 letter code for the show to remove.
"""
self.client.get('myqueue/remove/', {'id': show_stub})
@require_login
def get_history(self):
"""Get the history for the current user.
Returns:
list[funimationlater.models.Show]
Raises:
funimationlater.error.UnknowResponse:
"""
resp = self.client.get('/history/get-items/?')
if 'watchlist' in resp:
return [Show(x['item'], self.client) for x in
resp['watchlist']['items']['historyitem']]
raise UnknowResponse(resp)
def get_shows(self, show_type, sort_by=SortBy.TITLE,
sort_order=SortOrder.DESC, limit=default_limit, offset=0,
**kwargs):
"""
Args:
show_type (str): simulcasts, broadcast-dubs, genre
sort_by (str):
sort_order (str):
offset (int):
limit (int):
Returns:
list[funimationlater.models.Show]:
"""
resp = self._get_content(
id=show_type,
sort=sort_by,
sort_direction=sort_order,
itemThemes='dateAddedShow',
territory='US',
role='g',
offset=offset,
limit=limit,
**kwargs
)
return resp
def get_show(self, show_id):
"""Get the :class:`funimationlater.models.ShowDetails` for `show_id`.
Args:
show_id (int): The shows numeric ID.
Returns:
funimationlater.models.ShowDetails: Returns None if no show exists.
Raises:
funimationlater.error.UnknownShow:
"""
try:
resp = self.client.get('detail/', {'pk': show_id})
if resp:
return ShowDetails(resp['list2d'], self.client)
except DetailedHTTPError:
# So we don't need the same code twice, just pass on HTTPError
# then raise UnknownShow. If it's a different error it should
# raise that error instead.
pass
raise UnknownShow('Show with ID {} not found'.format(show_id))
def get_episode(self, show_id, episode_id, audio_type=None):
"""Get a specific episodes details.
Args:
show_id (int):
episode_id (int):
audio_type (Optional[str]):
Returns:
funimationlater.models.EpisodeDetails:
Raises:
funimationlater.error.UnknownEpisode:
"""
params = {'id': episode_id, 'show': show_id}
if audio_type is not None:
params['audio'] = audio_type
try:
resp = self.client.get('player/', params)
if resp:
return EpisodeDetails(resp['player'], self.client)
except DetailedHTTPError:
# See get_episodes note.
pass
raise UnknownEpisode("Episode ID {} for show {} doesn't exist".format(
episode_id, show_id))
def search(self, query):
"""Perform a search using the API.
Args:
query (str): The query string
Returns:
list[funimationlater.models.Show]: a list of results
"""
resp = self.get_shows(ShowTypes.SEARCH, q=query)
return resp
def get_all_shows(self):
"""Get a list of all shows.
Returns:
List[funimationlater.models.Show]:
"""
# limit=-1 appears to return all shows
shows = self.get_shows(ShowTypes.SHOWS, limit=-1)
if shows is None:
return []
else:
return shows
def get_simulcasts(self):
"""Get a list of all shows being simulcasted.
NOTE(Sinap): This doesn't appear to be working. You always get the same
20 shows even if you set the limit or offset higher, the
shows are also old.
Args:
Returns:
list[funimationlater.models.Show]
"""
shows = self.get_shows(ShowTypes.SIMULCAST)
return shows
def _get_content(self, **kwargs):
resp = self.client.get('/longlist/content/page/', kwargs)['items']
if not resp or isinstance(resp, (tuple, str)):
return None
resp = resp['item']
if isinstance(resp, list):
return [Show(x, self.client) for x in resp]
else:
return [Show(resp, self.client)]
def __iter__(self):
"""
Returns:
funimationlater.models.Show:
"""
offset = 0
limit = self.default_limit
while True:
shows = self.get_shows(ShowTypes.SHOWS, limit=limit, offset=offset)
for show in shows:
yield show
if len(shows) < limit:
break
offset += limit
def __getitem__(self, item):
"""
Args:
item (int): Show ID to get
Returns:
funimationlater.models.ShowDetails:
"""
if isinstance(item, int):
try:
return self.get_show(item)
except DetailedHTTPError:
return None
# NOTE(Sinap): This would allow you to do api['Cowboy Bebop'] but I'm
# not sure if I should do that because it could be
# confusing if you can use both the integer index and
# a string
# elif isinstance(item, str):
# return self.search(item)
|
"""Basic serializer."""
import re
from rest_framework import serializers
class MessageSerializer(serializers.Serializer):
"""Message serializer."""
message = serializers.CharField(required=True)
class ErrorSerializer(MessageSerializer):
"""Error serializer."""
errors = serializers.ListField(child=serializers.DictField(), required=False)
class PermissionDeniedSerializer(serializers.Serializer):
"""Permission denied serializer."""
detail = serializers.CharField(required=True)
def char_regex_validator_string(value: str):
"""Validate regex string without special characters."""
if not re.match(r"^[a-zA-Z0-9_-]*$", value):
raise serializers.ValidationError(
f"{value} is not in the allowed characters: a-zA-Z0-9_-"
)
def char_regex_validator_string_space(value: str):
"""Validate regex string with space."""
if not re.match(r"^[ a-zA-Z0-9_-]*$", value):
raise serializers.ValidationError(
f"{value} is not in the allowed characters: a-zA-Z0-9_-"
)
|
import argparse
import json
import os
import os.path as osp
import pickle
import math
from datetime import datetime
from pdb import set_trace as st
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torch import optim
from torchvision.datasets.folder import ImageFolder, IMG_EXTENSIONS, default_loader
import knockoff.adversary.resnet10_protect_model as resnet10_protect_model
import knockoff.adversary.resnet18_protect_model as resnet18_protect_model
valid_select_modes = [
"posnegweight_large", "posnegweight_small",
"posonlyweight_large", "posonlyweight_small",
"posnegchannel_large", "posnegchannel_small",
# "posonly_large", "posonly_small",
"edgeweight_large", "edgeweight_small",
"channelweight_large", "channelweight_small",
"randomweight_large", "randomweight_small",
"randomchannel_large", "randomchannel_small",
"edgeweight_small_avg",
"posnegweight_small_all", "posnegweight_large_all",
"posnegweight_small_subset", "posnegweight_large_subset",
"posonlyweight_small_subset", "posonlyweight_large_subset",
"posonlyweight_small_all",
]
def clear_conv(
conv,
index,
select_mode,
):
if (("posnegweight" in select_mode) or
("edgeweight" in select_mode) or
("randomweight" in select_mode) or
("posonlyweight" in select_mode)):
shape = conv.weight.shape
weight = conv.weight.flatten()
weight[index] = .0
weight = weight.reshape(shape)
if (("posnegchannel" in select_mode) or
("channelweight" in select_mode) or
("randomchannel" in select_mode)):
conv.weight.data[index] = .0
# conv.weight = weight
return conv
def select_by_ratio(
class_logics,
layer_name,
target_conv,
select_mode = "posneg_large",
select_ratio = 0.1,
):
assert select_mode in valid_select_modes
if (("posnegweight" in select_mode) or ("posonlyweight" in select_mode)):
weight_cnt = class_logics[layer_name]
shape = weight_cnt.shape
total_num = int(np.prod(shape) * select_ratio)
largest_index = (-weight_cnt).flatten().argsort()[:total_num]
smallest_index = weight_cnt.flatten().argsort()[:total_num]
if "large" in select_mode:
return largest_index
elif "small" in select_mode:
return smallest_index
else:
raise RuntimeError("Not implement")
if ("posnegchannel" in select_mode):
feature = class_logics["initial_conv" if layer_name == "conv2d" else layer_name]
size = feature.shape[0]
select_number = int(size * select_ratio)
if "posneg" in select_mode:
feature = feature[int(size/2):] + feature[:int(size/2)]
largest_index = (-feature).argsort()[:select_number]
smallest_index = feature.argsort()[:select_number]
if "large" in select_mode:
# print(f"select result {largest_index}")
return largest_index
elif "small" in select_mode:
# print(f"select result {smallest_index}")
return smallest_index
else:
raise RuntimeError("Not implement")
if "channelweight" in select_mode:
size = target_conv.weight.shape[0]
select_number = int(size * select_ratio)
weight_count = target_conv.weight.abs().sum(-1).sum(-1).sum(-1)
largest_index = (-weight_count).flatten().argsort()[:select_number]
smallest_index = weight_count.flatten().argsort()[:select_number]
if "large" in select_mode:
return largest_index
elif "small" in select_mode:
return smallest_index
else:
raise RuntimeError("Not implement")
if ("edgeweight" in select_mode):
weight = target_conv.weight.abs()
shape = weight.shape
total_num = int(np.prod(shape) * select_ratio)
largest_index = (-weight).flatten().argsort()[:total_num]
smallest_index = weight.flatten().argsort()[:total_num]
if "large" in select_mode:
return largest_index
elif "small" in select_mode:
return smallest_index
else:
raise RuntimeError("Not implement")
if "randomweight" in select_mode:
weight = target_conv.weight
shape = weight.shape
total_num = int(np.prod(shape) * select_ratio)
# indexes = np.random.choice(weight.flatten().shape[0], total_num)
index = np.random.choice(np.prod(weight.shape), total_num)
return index
if "randomchannel" in select_mode:
weight = target_conv.weight
shape = weight.shape
total_num = int(select_ratio * shape[0])
# indexes = np.random.choice(weight.flatten().shape[0], total_num)
indexes = np.random.choice(shape[0], total_num)
return indexes
else:
raise RuntimeError("Not implement")
def clear_model_fc(
model,
arch = "resnet10",
class_id = "all",
layer_name = "fc",
select_mode = "posnegweight_large",
select_ratio = 0,
):
if arch == "resnet10":
load_class_logics = resnet10_protect_model.load_class_logics
load_multi_class_logics = resnet10_protect_model.load_multi_class_logics
layer_tf_to_torch = resnet10_protect_model.layer_tf_to_torch
refresh_model_conv = resnet10_protect_model.refresh_model_conv
elif arch == "resnet18":
load_class_logics = resnet18_protect_model.load_class_logics
load_multi_class_logics = resnet18_protect_model.load_multi_class_logics
layer_tf_to_torch = resnet18_protect_model.layer_tf_to_torch
refresh_model_conv = resnet18_protect_model.refresh_model_conv
else:
raise NotImplementedError
if "channel" in select_mode and layer_name == "dense":
return model
if "posneg" in select_mode or "posonly" in select_mode:
if "posnegweight" in select_mode:
logic_name = "posnegweight"
elif "posnegchannel" in select_mode:
logic_name = "posneg"
elif "posonlyweight" in select_mode:
logic_name = "posonlyweight"
else:
raise RuntimeError("Not Implement")
if isinstance(class_id, int) or class_id == "all":
class_logics = load_class_logics(
logic_name,
class_id
)
elif isinstance(class_id, tuple):
class_logics = load_multi_class_logics(
logic_name,
class_id,
)
else:
class_logics = None
conv = layer_tf_to_torch[layer_name](model)
channel_index = select_by_ratio(
class_logics,
layer_name,
conv,
select_ratio=select_ratio,
select_mode=select_mode,
)
conv = clear_conv(
conv,
channel_index,
select_mode,
)
model = refresh_model_conv(model, layer_name, conv)
# new_weight = layer_tf_to_torch[layer_name](model).weight.data
# diff = raw_weight != new_weight
return model |
from __future__ import print_function
import itertools
import numpy as np
import sys
sys.path.append('..')
from lib_inhomogeneous_gutzwiller import Gutzwiller
def test_lattice_definition_2D():
xy = np.empty(2, dtype=np.int32)
for L in [2, 3, 10, 11]:
G = Gutzwiller(D=2, L=L)
for i_site in range(G.N_sites):
x, y = G.site_coords[i_site, :]
assert i_site == G.xy2i(x, y)
for x, y in itertools.product(range(L), repeat=2):
i_site = G.xy2i(x, y)
G.i2xy(i_site, xy)
assert x == xy[0]
assert y == xy[1]
def test_neighbors_reciprocity():
for OBC in [0, 1]:
for D in [1, 2]:
for L in [5, 10]:
G = Gutzwiller(D=D, L=L, OBC=OBC)
for i_site in range(G.N_sites):
for j_nbr in range(G.N_nbr[i_site]):
j_site = G.nbr[i_site, j_nbr]
assert i_site in list(G.nbr[j_site, :])
if __name__ == '__main__':
test_lattice_definition_2D()
test_neighbors_reciprocity()
|
import math
num = float(input("Digite um número:"))
print("\n")
raiz = int ((num) ** 0.5)
raiz2 = float((num) ** 0.5)
print("O numero inteiro que mais se aproxima da raiz quadrada de", num,"é", raiz + 1)
print("A raiz quadrada de", num, "é {} ".format(round(raiz2 , 2))) |
# Copyright (c) 2019 CommerceBlock Team
# Use of this source code is governed by an MIT
# license that can be found in the LICENSE file.
import sys
import json
import argparse
import binascii
import io
import logging
import appdirs
import os
import time
import requests
import threading
import hashlib
import math
import base64
import git
from binascii import hexlify
import mst
import mst.rpchost as rpc
from mst.verify import verify_commitment, verify_unspent, verify_addition_proof
from mst.ecc import key_gen, ECPrivkey, Hash
APPDIRS = appdirs.AppDirs('msc','mainstay')
def sha256sum(filename):
h = hashlib.sha256()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
dropbox_chunk_size = 4 * 1024 * 1024
dropbox_readbuf_size = 64 * 1024
def dropbox_checksum(filename):
with open(filename, 'rb') as f:
sums = []
go_on = True
while go_on:
h = hashlib.sha256()
n = 0
while n < dropbox_chunk_size:
r = f.read(min(dropbox_readbuf_size, dropbox_chunk_size - n))
if not r:
go_on = False
break
n += len(r)
h.update(r)
if n:
sums.append(h.digest())
return hashlib.sha256(bytes().join(sums)).hexdigest()
def md5_checksum(filename):
with open(filename, 'rb') as f:
h = hashlib.md5()
for chunk in iter(lambda: f.read(4096), b''):
h.update(chunk)
md5sum = h.hexdigest()
return hashlib.sha256(md5sum.encode('utf-8')).hexdigest()
def is_hex(s):
try:
int(s, 16)
return True
except ValueError:
return False
def get_settings(args):
filename = APPDIRS.user_data_dir + '/config.json'
try:
with open(filename,'r') as file:
config = json.load(file)
logging.info("Reading parameters from config file")
except:
logging.info("No configuration file found")
config = {}
return config
def save_settings(settings):
if not os.path.exists(APPDIRS.user_data_dir):
os.makedirs(APPDIRS.user_data_dir)
filename = APPDIRS.user_data_dir + '/config.json'
try:
with open(filename, 'w') as f:
json.dump(settings,f, indent=2, sort_keys=True)
except:
logging.error("Write config error")
def load_proofseq(slot):
filename = APPDIRS.user_data_dir + "/slot_" + str(slot) + "_sequence.msp"
try:
with open(filename, 'r') as f:
seq = json.loads(f.read())
except:
seq = []
return seq
def save_proofseq(slot,seq):
if not os.path.exists(APPDIRS.user_data_dir):
os.makedirs(APPDIRS.user_data_dir)
filename = APPDIRS.user_data_dir + "/slot_" + str(slot) + "_sequence.msp"
try:
with open(filename, 'w') as f:
json.dump(seq,f, indent=2, sort_keys=True)
except:
logging.error("Write proof sequence error")
def get_proof_from_commit(slot,commit):
filename = APPDIRS.user_data_dir + "/slot_" + str(slot) + "_sequence.msp"
try:
with open(filename, 'r') as f:
seq = json.loads(f.read())
except:
return None
for addproof in seq:
if addproof["commitment"] == commit:
return addproof
logging.info("Commitment not in saved sequence proof")
return None
def add_to_proofseq(seq,sproof):
addproof = {"txid":sproof["response"]["attestation"]["txid"],
"commitment":sproof["response"]["merkleproof"]["commitment"],
"merkle_root":sproof["response"]["merkleproof"]["merkle_root"],
"ops":sproof["response"]["merkleproof"]["ops"],
"date":sproof["response"]["attestation"]["inserted_at"],
"height":'0'}
seq.insert(0,addproof)
return seq
def writetofile(output,filename):
filename = os.getcwd() + '/' + filename
try:
with open(filename,'w') as file:
json.dump(output,file)
except:
logging.error("write output to file error")
def readfromfile(filename):
filename = os.getcwd() + '/' + filename
try:
with open(filename) as file:
seq = json.load(file)
return seq
except:
logging.error("Error reading from file")
sys.exit(1)
def get_mainstay_api(url,rstring):
try:
r = requests.request('GET', url+rstring, timeout=2)
r.raise_for_status()
proof = r.json()
return proof
except:
logging.error("Get mainstay proof http error")
return False
return True
def update_proofseq(service_url,seq,slot,txid):
# add all recent slot proofs to the proof sequence
try:
top_txid = seq[0]["txid"]
except:
top_txid = " "
rstring = "/api/v1/position?position="+str(slot)
pp = get_mainstay_api(service_url,rstring)
total = pp["total"]
try:
np = math.ceil(pp["pages"])
except:
logging.error("ERROR: get position proofs http error")
return False
ip = 0
fbase = False
for page in range(np):
logging.debug("Reading page "+str(page+1)+" of "+str(np))
if page > 0:
try:
rstring = "/api/v1/position?position="+str(slot)+"&page="+str(page+1)
pp = get_mainstay_api(service_url,rstring)
except:
logging.error("ERROR: get position proofs page http error")
sys.exit(1)
for sproof in pp["data"]:
try:
logging.debug("TxID: "+sproof["txid"])
if sproof["txid"] != top_txid:
addproof = {"txid":sproof["txid"],
"commitment":sproof["commitment"],
"merkle_root":sproof["merkle_root"],
"ops":sproof["ops"],
"date":sproof["date"],
"height":'0'}
if "additions" in sproof:
addproof["additions"] = sproof["additions"]
if sproof["confirmed"]:
seq.insert(ip,addproof)
ip = ip + 1
else:
fbase = True
break
except:
logging.error("ERROR: get commit proof error")
sys.exit(1)
if sproof["txid"] == txid:
fbase = True
break
if fbase: break
# check total
rstring = "/api/v1/position?position="+str(slot)
pp = get_mainstay_api(service_url,rstring)
if total == pp["total"]:
return seq
else:
logging.error("ERROR: pages updated during retrieval - please re-run fetch.")
sys.exit(1)
def attest_command(args):
settings = get_settings(args)
if args.addition:
addition = True
else:
addition = False
if args.dropbox_checksum:
proof_checksum = dropbox_checksum
else:
proof_checksum = sha256sum
if args.slot:
slot = str(args.slot)
else:
try:
slot = str(settings["slot"])
except:
logging.error("Missing slot ID in config and argument")
return False
if args.api_token:
token = args.api_token
else:
try:
token = settings["api_token"]
except:
logging.error("Missing API token in config and argument")
return False
if args.privkey:
privkey = args.privkey
else:
try:
privkey = settings["privkey"]
except:
privkey = None
logging.info("No private key: unsigned commitment")
if args.commitment:
if len(args.commitment) != 64:
logging.error("Invalid commitment string: incorrect length")
return False
if not is_hex(args.commitment):
logging.error("Invalid commitment string: not hex")
return False
commitment = args.commitment
if args.filename or args.md5_checksum:
if args.filename:
arg = args.filename
message = "SHA256"
proof_checksum = sha256sum
if args.md5_checksum:
arg = args.md5_checksum
message = "SHA256 derived from MD5 hash"
proof_checksum = md5_checksum
if arg[0] == '/':
filename = arg
else:
filename = os.getcwd() + '/' + arg
try:
commitment = proof_checksum(filename)
except:
logging.error("ERROR: could not open specified file")
return False
logging.info(message+"("+arg+"): "+commitment)
if args.git:
if args.git == '0':
try:
git_path = str(settings["git_path"])
except:
logging.error("Missing Git repo path in config and argument")
return False
else:
git_path = args.git
try:
repo = git.Repo(git_path)
line = repo.git.log('--pretty=oneline','-1')
except:
logging.error("Invalid Git repository")
return False
padding = '0'*24
commitment = line[0:40] + padding
logging.info('HEAD: '+line[0:40])
if args.directory:
if args.directory == '0':
try:
dir_path = str(settings["directory"])
except:
logging.error("Missing directory path in config and argument")
return False
else:
dir_path = args.directory
try:
filelist = os.listdir(dir_path)
except:
logging.error("ERROR: Invalid directory path.")
return False
filelist.sort()
if dir_path[-1] != '/':
dir_path += '/'
time = 0
cstream = ''
nfiles = 0
for file in filelist:
mtime = os.path.getmtime(dir_path+file)
if mtime <= time:
logging.warning("WARNING: modification times out of order with name sequence")
if os.path.isfile(dir_path+file):
try:
filehash = proof_checksum(dir_path+file)
cstream += filehash
time = mtime
nfiles += 1
logging.debug("File "+file)
logging.debug("SHA256 "+filehash)
logging.debug("Time "+str(time))
except:
logging.error("ERROR: could not open file: "+file)
return False
#create commitment from hash list
preimage = bytes.fromhex(cstream)
commitment = Hash(preimage).hex()
logging.info("Hash sequence: "+str(nfiles)+" files")
logging.info("Commitment: "+commitment)
headers = {'Content-Type': 'application/json'}
payload = {"commitment":commitment,"position":slot,"token":token}
payload_enc = str(base64.b64encode(json.dumps(payload).encode('utf-8')).decode('ascii'))
if privkey:
key = ECPrivkey(bytes.fromhex(privkey))
message = bytes.fromhex(commitment)
sig = key.sign_message(message, True)
sig_string = str(base64.b64encode(sig).decode('ascii'))
else:
sig_string = ""
data = {"X-MAINSTAY-PAYLOAD":payload_enc,"X-MAINSTAY-SIGNATURE":sig_string}
if addition:
api_path = '/api/v1/commitment/add'
else:
api_path = '/api/v1/commitment/send'
try:
response = requests.post(args.service_url+api_path, headers=headers, data=json.dumps(data))
rdata = response.json()
except Exception as error:
logging.error("ERROR: could not send request")
logging.error(error)
return False
if 'error' in rdata:
logging.error("Mainstay service error: "+rdata["error"])
else:
logging.info("Attestation sent")
return rdata
def fetch_command(args):
settings = get_settings(args)
if args.slot and not args.gitpath:
slot = args.slot
else:
try:
slot = settings["slot"]
except:
logging.error("Missing slot ID in config and argument")
return False
if args.txid:
if args.txid == '0':
try:
txid = settings["txid"]
except:
txid = None
else:
txid = args.txid
# proof type
if args.commitment:
if args.commitment == 0:
rstring = "/api/v1/commitment/latestproof?position="+str(slot)
sproof = get_mainstay_api(args.service_url,rstring)
if args.filename and sproof:
writetofile(sproof,args.filename)
if args.output and sproof:
logging.info(sproof)
if hasattr(args, 'save_object'):
args.save_object = sproof
return True
if len(args.commitment) != 64:
logging.error("Invalid commitment string: incorrect length")
return False
if not is_hex(args.commitment):
logging.error("Invalid commitment string: not hex")
return False
rstring = "/api/v1/commitment/commitment?commitment="+args.commitment
sproof = get_mainstay_api(args.service_url,rstring)
if args.filename and sproof:
writetofile(sproof,args.filename)
if args.output and sproof:
logging.info(json.dumps(sproof, indent=2, sort_keys=True))
if hasattr(args, 'save_object'):
args.save_object = sproof
return True
if args.list:
commitment_list = [item for item in args.list.split(',')]
for commitment in commitment_list:
if len(args.commitment) != 64:
logging.error("Invalid commitment string: incorrect length")
return False
if not is_hex(args.commitment):
logging.error("Invalid commitment string: not hex")
return False
seq = []
for commitment in commitment_list:
rstring = "/api/v1/commitment/commitment?commitment="+args.commitment
sproof = get_mainstay_api(args.service_url,rstring)
seq = add_to_proofseq(seq,sproof)
if args.filename and sproof:
writetofile(seq,args.filename)
if args.output and sproof:
logging.info(json.dumps(seq, indent=2, sort_keys=True))
if hasattr(args, 'save_object'):
args.save_object = seq
return True
if args.gitpath:
if args.gitpath == '0':
try:
git_path = str(settings["git_path"])
except:
logging.error("Missing Git repo path in config and argument")
return False
else:
git_path = args.gitpath
try:
repo = git.Repo(git_path)
gitlog = repo.git.log('--pretty=oneline')
except:
logging.error("Invalid Git repository")
return False
clist = gitlog.splitlines()
try:
init_txid = clist[-1][41:105]
init_slot = clist[-1][106:]
slotint = int(init_slot)
except:
logging.error("Initial Git commit not valid staychain ID")
return False
if not is_hex(init_txid):
logging.error("Invalid Git commit staychain ID: not hex")
seq = load_proofseq(slot)
seq = update_proofseq(args.service_url,seq,init_slot,init_txid)
if seq:
if args.filename and seq:
writetofile(seq,args.filename)
if args.output and seq:
logging.info(json.dumps(seq, indent=2, sort_keys=True))
if hasattr(args, 'save_object'):
args.save_object = seq
save_proofseq(slot,seq)
logging.info("Git repo initial commit ID: "+init_txid+":"+init_slot)
logging.info("Sequence length: "+str(len(seq)))
logging.info(" Start: "+seq[-1]["date"])
logging.info(" End: "+seq[0]["date"])
return True
else:
logging.info("Empty sequence")
return False
if args.txid:
if len(txid) != 64:
logging.error("Invalid TxID string: incorrect length")
return False
elif not is_hex(txid):
logging.error("Invalid TxID string: not hex")
return False
seq = load_proofseq(slot)
seq = update_proofseq(args.service_url,seq,slot,txid)
if seq:
if args.filename and seq:
writetofile(seq,args.filename)
if args.output and seq:
logging.info(json.dumps(seq, indent=2, sort_keys=True))
if hasattr(args, 'save_object'):
args.save_object = seq
save_proofseq(slot,seq)
logging.info("Sequence length: "+str(len(seq)))
logging.info(" Start: "+seq[-1]["date"])
logging.info(" End: "+seq[0]["date"])
return True
else:
logging.info("Empty sequence")
return False
if args.update:
txid = None
seq = load_proofseq(slot)
if seq:
olen = len(seq)
else:
olen = 0
if olen < 1:
logging.error("No proof sequence to update. Run -i first.")
return False
seq = update_proofseq(args.service_url,seq,slot,txid)
save_proofseq(slot,seq)
if args.filename and seq:
writetofile(seq[0:-olen],args.filename)
if args.output and seq:
logging.info(json.dumps(seq[0:-olen], indent=2, sort_keys=True))
if hasattr(args, 'save_object'):
args.save_object = seq[0:-olen]
logging.info("Added "+str(len(seq)-olen)+" proofs")
logging.info("Sequence length: "+str(len(seq)))
logging.info(" Start: "+seq[-1]["date"])
logging.info(" End: "+seq[0]["date"])
return True
logging.info("Please specify a fetch option (fetch -h for details).")
def verify_command(args):
settings = get_settings(args)
if args.dropbox_checksum:
proof_checksum = dropbox_checksum
elif args.md5_checksum:
proof_checksum = md5_checksum
else:
proof_checksum = sha256sum
if args.bitcoin_node:
bitcoin_node = args.bitcoin_node
else:
try:
slot = settings["bitcoin_node"]
except:
logging.error("Missing bitcoin node connection details in config and argument")
return False
if args.slot:
slot = args.slot
else:
try:
slot = settings["slot"]
except:
logging.error("Missing slot ID in config and argument")
return False
if args.txid:
txid_base = args.txid
else:
try:
txid_base = settings["txid"]
except:
txid_base = None
if args.commitment:
if len(args.commitment) != 64:
logging.error("Invalid commitment string: incorrect length")
return False
if not is_hex(args.commitment):
logging.error("Invalid commitment string: not hex")
return False
addproof = get_proof_from_commit(slot,args.commitment)
if not addproof:
logging.info("Retrieving slot proof from "+args.service_url)
rstring = "/api/v1/commitment/commitment?commitment="+args.commitment
sproof = get_mainstay_api(args.service_url,rstring)
if 'response' not in sproof:
logging.info("Status: "+sproof["error"])
return False, sproof["error"]
addproof = {"txid":sproof["response"]["attestation"]["txid"],
"commitment":sproof["response"]["merkleproof"]["commitment"],
"merkle_root":sproof["response"]["merkleproof"]["merkle_root"],
"ops":sproof["response"]["merkleproof"]["ops"],
"date":sproof["response"]["attestation"]["inserted_at"]}
if sproof["response"]["attestation"]["confirmed"]:
ver,_ = verify_commitment(slot,addproof,bitcoin_node)
ver_com = "Verified commitment "+ver[0]+" in slot "+str(slot)+" in TxID "+ver[1]
ver_block = "In Bitcoin block "+ver[2]+" height "+ver[3]+" at "+ver[4]
if args.commitment != ver[0]:
try:
if verify_addition_proof(sproof["response"]["addproof"],sproof["response"]["merkleproof"]["commitment"]):
logging.info("Verified addition "+sproof["response"]["addproof"]["addition"]+" in commitment")
else:
logging.info("ERROR: Not verified addition "+sproof["addproof"]["addition"]+" in commitment")
except:
logging.error("ERROR: Commitment addition verification failure")
logging.info(ver_com+"\n"+ver_block)
return True, ver_com, ver_block
else:
logging.info("Status: Awaiting Confirmation")
return False, "Awaiting Confirmation"
if args.unspent:
if len(args.unspent) != 64:
logging.error("Invalid commitment string: incorrect length")
return False
if not is_hex(args.unspent):
logging.error("Invalid commitment string: not hex")
return False
try:
rstring = "/api/v1/commitment/latestproof?position="+str(slot)
sproof = get_mainstay_api(args.service_url,rstring)
except:
logging.error("ERROR: Mainstay API request error.")
return False
addproof = {"txid":sproof["response"]["txid"],
"commitment":sproof["response"]["commitment"],
"merkle_root":sproof["response"]["merkle_root"],
"ops":sproof["response"]["ops"]}
ver,_ = verify_commitment(slot,addproof,bitcoin_node)
usp = verify_unspent(addproof["txid"],bitcoin_node)
if ver[0] != args.unspent:
logging.info(args.unspent+" not latest commitment")
return False
if usp:
ver_com = "Latest commitment "+ver[0]+" in slot "+str(slot)+"\nUnspent TxID "+ver[1]
ver_block = "In Bitcoin block "+ver[2]+" height "+ver[3]+" at "+ver[4]
logging.info(ver_com+"\n"+ver_block)
return True
else:
ver_com = "Commitment "+ver[0]+" in slot "+str(slot)+"\nSpent TxID "+ver[1]
ver_block = "In Bitcoin block "+ver[2]+" height "+ver[3]+" at "+ver[4]
logging.info(ver_com+"\n"+ver_block)
return False
if args.filename:
seq = readfromfile(args.filename)
elif args.proof:
if args.proof == '0':
seq = load_proofseq(slot)
else:
try:
seq = json.loads(args.proof)
except:
logging.error("Invalid JSON for proof sequence")
return False
else:
logging.error("No proof sequence to verify: use option -p or -f to specify proof")
return False
if len(seq) < 1:
logging.error("No proof sequence to verify")
return False
if args.list:
commitment_list = [item for item in args.list.split(',')]
for commitment in commitment_list:
if len(commitment) != 64:
logging.error("Invalid commitment string: incorrect length")
return False
if not is_hex(commitment):
logging.error("Invalid commitment string: not hex")
return False
itr = 0
#loop over all slot proofs in sequence
for sproof in seq:
# zero commits are null and skipped
if sproof["commitment"] == '0'*64: continue
if commitment_list[itr] == sproof["commitment"]:
logging.debug("Commitment "+commitment_list[itr])
logging.debug("In TxID "+sproof["txid"])
logging.debug("Block height "+sproof["height"])
continue
else:
itr += 1
if commitment_list[itr] == sproof["commitment"]:
logging.debug("Commitment "+commitment_list[itr])
logging.debug("In TxID "+sproof["txid"])
logging.debug("Block height "+sproof["height"])
continue
else:
logging.error("Verification failed. Commitments not matched.")
return False
if itr != len(commitment_list)-1:
logging.error("Verification failed. Additional commitments on list not in proof.")
return False
logging.info("Verified proof sequence against commitment list.")
return True
if args.gitpath:
if args.gitpath == '0':
try:
git_path = str(settings["git_path"])
except:
logging.error("Missing Git repo path in config and argument")
return False
else:
git_path = args.gitpath
try:
repo = git.Repo(git_path)
gitlog = repo.git.log('--pretty=oneline')
except:
logging.error("Invalid Git repository")
return False
padding = '0'*24
ptr = 0
clist = gitlog.splitlines()
matched = []
#loop over all slot proofs in sequence
for sproof in seq:
# zero commits are null and skipped
if sproof["commitment"] == '0'*64: continue
#loop over all commits
found = False
for itr in range(ptr,len(clist)):
if clist[itr][0:40]+padding == sproof["commitment"]:
ptr = itr
found = True
matched.append(sproof["commitment"])
logging.debug("Commitment "+sproof["commitment"])
logging.debug("In TxID "+sproof["txid"])
logging.debug("Block height "+sproof["height"])
break
if not found:
logging.error("Verification failed. Commitment "+sproof["commitment"][0:40]+" not in repo.")
return False
logging.info("Verified proof sequence against commit history to "+matched[0][0:40])
if seq[0]["commitment"] != clist[0]:
ncom = 0
for commit in clist:
if commit[0:40] == seq[0]["commitment"]: break
ncom += 1
logging.warning("WARNING: last "+str(ncom)+" commits not attested.")
try:
init_txid = clist[-1][41:105]
init_slot = clist[-1][106:]
except:
logging.info("Initial Git commit not valid staychain ID")
if init_txid in seq[-1]["txid"] and int(slot) == int(init_slot):
logging.info("Verified Git commit history unique")
logging.info("Base txid: "+init_txid+" slot: "+str(init_slot))
else:
logging.info("Staychain ID not committed to Git history")
return True
if args.directory:
if args.directory == '0':
try:
dir_path = str(settings["directory"])
except:
logging.error("Missing directory path in config and argument")
return False
else:
dir_path = args.directory
try:
filelist = os.listdir(dir_path)
except:
logging.error("ERROR: Invalid directory path.")
return False
filelist.sort()
if dir_path[-1] != '/':
dir_path += '/'
time = 0
cstream = ''
chash = []
flist = []
#create list of cumulative file hashes
for file in filelist:
mtime = os.path.getmtime(dir_path+file)
if mtime <= time:
logging.warning("WARNING: modification times out of order with name sequence")
if os.path.isfile(dir_path+file):
try:
filehash = proof_checksum(dir_path+file)
flist.insert(0,file)
cstream += filehash
time = mtime
preimage = bytes.fromhex(cstream)
commitment = Hash(preimage).hex()
chash.insert(0,commitment)
except:
logging.error("ERROR: could not open file: "+file)
return False
#loop over all slot proofs in sequence
ptr = 0
fmatch = []
for sproof in seq:
# zero commits are null and skipped
if sproof["commitment"] == '0'*64: continue
#loop over all commits
found = False
for itr in range(ptr,len(chash)):
if chash[itr] == sproof["commitment"]:
ptr = itr
found = True
logging.debug("Commitment "+sproof["commitment"])
logging.debug("Latest file "+flist[itr])
logging.debug("In TxID "+sproof["txid"])
logging.debug("Block height "+sproof["height"])
fmatch.append(flist[itr])
break
if not found:
logging.error("Verification failed. Commitment "+sproof["commitment"]+" not in directory hash chain. ")
return False
logging.info("Verified proof sequence against directory hash chain.")
if seq[0]["commitment"] != chash[0]:
ncom = 0
for commit in chash:
if commit == seq[0]["commitment"]: break
ncom += 1
logging.warning("WARNING: last "+str(ncom)+" files not attested.")
logging.warning("Last file attested: "+fmatch[0])
return True
verout = []
nseq = []
txin = None
stxid = None
schain = []
#verify proof sequence against bitcoin staychain
for sproof in seq:
if txin:
if sproof["txid"] not in txin:
logging.error("Verification failed")
logging.error("TxID "+sproof["txid"]+" not input to "+stxid)
return False
ver,txin = verify_commitment(slot,sproof,bitcoin_node)
stxid = sproof["txid"]
verout.append(ver)
logging.debug("Verified commitment "+ver[0]+" in slot "+str(slot)+" in TxID "+ver[1])
logging.debug("In Bitcoin block "+ver[2]+" height "+ver[3]+" at "+ver[4])
sproof["height"] = ver[3]
nseq.append(sproof)
schain.append(sproof["txid"])
if args.proof:
if args.proof == '0':
save_proofseq(slot,nseq)
# verify staychain txid
if txid_base:
if txid_base in schain or txid_base in txin:
logging.info("Verified proof sequence against staychain base "+txid_base+" slot "+str(slot)+"\n")
else:
logging.error("Proof sequence verified but not on specified staychain base")
return False
else:
logging.info("Verified proof sequence\n")
logging.info("Start commitment in block "+verout[-1][2]+" height "+verout[-1][3]+" at "+verout[-1][4])
logging.info("End commitment in block "+verout[0][2]+" height "+verout[0][3]+" at "+verout[0][4])
usp = verify_unspent(seq[0]["txid"],bitcoin_node)
if usp:
logging.info("End commitment txout unspent")
else:
logging.info("End commitment txout spent")
return True
def sync_command(args):
settings = get_settings(args)
if args.bitcoin_node:
bitcoin_node = args.bitcoin_node
else:
try:
slot = settings["bitcoin_node"]
except:
logging.error("Missing bitcoin node connection details in config and argument")
return False
if args.sidechain_node:
sidechain_node = args.sidechain_node
else:
try:
slot = settings["sidechain_node"]
except:
logging.error("Missing sidechain node connection details in config and argument")
return False
if args.slot:
slot = args.slot
else:
try:
slot = settings["slot"]
except:
logging.error("Missing slot ID in config and argument")
return False
#get the staychain base txid from the sidechain genesis block
if bitcoin_node[0:6] != 'http://':
bitcoin_node = 'http://' + bitcoin_node
connection = rpc.RPCHost(bitcoin_node)
try:
gbh = connection.call('getblockhash',0)
gb = connection.call('getblock',gbh)
except:
logging.error('ERROR: sidechain getblock RPC failure')
return False
txid_base = gb["attestationhash"]
if args.slot:
slot = args.slot
else:
try:
slot = settings.get["slot"]
except:
slot = int(gb['mappinghash'],16)
if slot > 999999:
logging.error('ERROR: invalid slot position in sidechain header')
return False
seq = load_proofseq(slot)
seq = update_proofseq(args.service_url,seq,slot,txid)
verout = []
nseq = []
txin = None
stxid = None
schain = []
#verify proof sequence against bitcoin staychain
for sproof in seq:
if txin:
if sproof["txid"] not in txin:
logging.error("TxID "+sproof["txid"]+ "not input to "+stxid)
return False
ver,txin = verify_commitment(slot,sproof,bitcoin_node)
stxid = sproof["txid"]
verout.append(ver)
logging.debug("Verified commitment "+ver[0]+" in slot "+str(slot)+" in TxID "+ver[1])
logging.debug("In Bitcoin block "+ver[2]+" height "+ver[3]+" at "+ver[4])
sproof["height"] = ver[3]
nseq.append(sproof)
schain.append(sproof["txid"])
if args.proof:
if args.proof == '0':
save_proofseq(slot,nseq)
# verify staychain txid
if txid_base:
if txid_base in schain:
logging.info("Verified proof sequence against staychain "+txid_base+" slot "+str(slot))
logging.info("Staychain base "+txid_base+" committed to sidechain genesis")
else:
logging.error("Proof sequence not on committed staychain")
return False
else:
logging.info("Verified proof sequence\n")
#verify commitment sequence against sidechain
prevh = 0
sblocks = []
for sproof in nseq:
if commitment == '0'*64: continue
try:
block = connection.call('getblock',sproof["commitment"])
except:
logging.error("Verification failure: "+sproof["commitment"]+" not a sidechain block hash")
return False
if prevh != 0:
if prevh < block["height"]:
logging.error("Verification failure: block "+sproof["commitment"]+" out of sequence")
return False
prevh = block["height"]
sblocks.append(prevh)
logging.info("Verified sidechain attestation sequence")
logging.info("Latest attestated sidechain block: "+nseq[0]["commitment"]+" height "+str(sblocks[0]))
if args.filename and nseq:
writetofile(nseq,args.filename)
save_proofseq(slot,nseq)
def config_command(args):
settings = get_settings(args)
flag = False
if args.slot:
settings["slot"] = args.slot
flag = True
if args.txid:
settings["txid"] = args.txid
flag = True
if args.bitcoin_node:
settings["bitcoin_node"] = args.bitcoin_node
flag = True
if args.sidechain_node:
settings["sidechain_node"] = args.sidechain_node
flag = True
if args.api_token:
settings["api_token"] = args.api_token
flag = True
if args.privkey:
settings["privkey"] = args.privkey
flag = True
if args.gitpath:
settings["git_path"] = args.gitpath
flag = True
if args.directory:
settings["directory"] = args.directory
flag = True
if not flag:
logging.info(json.dumps(settings, indent=2, sort_keys=True))
logging.info("Data directory: "+APPDIRS.user_data_dir)
return True
logging.info("Set new config")
save_settings(settings)
def keygen_command(args):
settings = get_settings(args)
if args.gen:
entropy = args.gen
privkey = key_gen(entropy)
settings["privkey"] = privkey
logging.info("Generated key: "+str(privkey))
save_settings(settings)
return True
if args.public:
if args.public == '0':
try:
privkey = settings["privkey"]
except:
logging.error("Privkey not present in config file")
return False
else:
if len(args.public) != 64:
logging.error("Invalid private key: incorrect length")
return False
if not is_hex(args.public):
logging.error("Invalid private key: not hex")
return False
privkey = args.public
public_key = ECPrivkey(bytes.fromhex(privkey)).get_public_key_hex(compressed=True)
logging.info("Public key: "+str(public_key))
return public_key
if args.sign:
try:
privkey = settings["privkey"]
except:
logging.error("Privkey not present in config file")
return False
key = ECPrivkey(bytes.fromhex(privkey))
if len(args.sign) != 64:
logging.error("Invalid commitment: incorrect length")
return False
if not is_hex(args.sign):
logging.error("Invalid commitment: not hex")
return False
message = bytes.fromhex(args.sign)
sig = key.sign_message(message, True)
logging.info("Signature: "+str(base64.b64encode(sig).decode('ascii')))
return str(base64.b64encode(sig).decode('ascii'))
logging.info("Please specify a keygen option (keygen -h for details).")
def info_command(args):
settings = get_settings(args)
if args.slot:
slot = args.slot
else:
try:
slot = settings["slot"]
except:
logging.error("Missing slot ID in config and argument")
return False
try:
rstring = "/api/v1/commitment/latestproof?position="+str(slot)
sproof = get_mainstay_api(args.service_url,rstring)
except:
logging.error("ERROR: Mainstay API request error.")
if "error" in sproof.keys():
logging.info("Slot "+str(slot)+" not active.")
return False
logging.info("Slot "+str(slot)+" last commitment: "+sproof["response"]["commitment"])
logging.info("Base ID: "+sproof["response"]["txid"]+":"+str(slot))
if args.config:
settings["txid"] = sproof["response"]["txid"]
logging.info("Set new config for base TxID")
save_settings(settings)
return sproof["response"]["txid"]
|
from API import SolvedAPI
from make_table import Table
import json
import argparse
import os
config = dict()
solution_list = dict()
changeLevel_list = list()
api = None
table = None
def getFolder(path, EXCEPT=list()):
return [ folder for folder in os.listdir(path) \
if os.path.isdir(f"{path}/{folder}") and folder not in EXCEPT ]
# New Problem Update
def updateProblems():
print("update start")
table.run()
print("update end")
# Solution Update
def updateSolution():
rootFolder = "./solution"
tagFolder = getFolder(rootFolder) # in ./solution
for tag in tagFolder:
solution_list[tag] = set()
problemPath = f"{rootFolder}/{tag}"
problems = getFolder(problemPath)
for problem in problems:
solution_list[tag].add(problem)
# List.md 정리 (Solution Link)
def updateList():
solutionRPATH = "./../solution"
rootFolder = "./"
tagFolder = config.get('tags')
for tag in tagFolder:
currentPath = f"{rootFolder}/{tag}"
INFO = None
with open(f"{currentPath}/list.md", 'r') as f:
INFO = f.readlines()
f.close()
update = False
NEWINFO = list()
for line in INFO:
split_line = line.split(",")
problemId = split_line[-2]
if tag in solution_list and problemId in solution_list[tag]:
split_line[-1] = f"{solutionRPATH}/{tag}/{problemId}\n"
update = True
line = ",".join(split_line)
NEWINFO.append(line)
if update:
with open(f"{currentPath}/list.md", 'w') as f:
f.writelines(NEWINFO)
f.close()
def updateStatus():
os.system('python3 ./scripts/arrange.py > status.md')
def updateLevel():
table.run(force = True)
if __name__=="__main__":
# Read Config
with open('./scripts/config.json', 'r') as f:
config = json.load(f)
f.close()
api = SolvedAPI(config.get('API'))
table = Table(api, config)
parser = argparse.ArgumentParser('Auto Update')
arg = parser.add_argument
arg('--all', dest='updateAll', action='store_true')
parser.set_defaults(updateAll=False)
arg('--solution', dest='updateSolution', action='store_true')
parser.set_defaults(updateSolution=False)
arg('--level', dest='updateLevel', action='store_true')
parser.set_defaults(updateLevel=False)
arg('--problem-update', dest='updateProblem', action='store_true')
parser.set_defaults(updateProblem=False)
arg('--push', dest='pushEvent', action='store_true')
parser.set_defaults(pushEvent=False)
args = parser.parse_args()
print("START")
if args.pushEvent:
args.updateSolution = True
args.updateProblem = True
if args.pushEvent or args.updateAll or args.updateSolution:
os.system('python3 ./scripts/make_main_readme.py')
if args.updateAll or args.updateLevel:
updateLevel()
if args.updateAll or args.updateSolution:
updateSolution()
updateList()
if args.updateProblem:
updateProblems()
updateStatus()
|
from __future__ import print_function
import sys
class ErrorLog(object):
NOTE = "notice"
WARN = "warning"
FAIL = "error"
REPORT_FILE = "docsgen_report.json"
def __init__(self):
self.errlist = []
self.has_errors = False
self.badfiles = {}
def add_entry(self, file, line, msg, level):
self.errlist.append( (file, line, msg, level) )
self.badfiles[file] = 1
print("!! {} at {}:{}: {}".format(level.upper(), file, line, msg) , file=sys.stderr)
sys.stderr.flush()
if level == self.FAIL:
self.has_errors = True
def write_report(self):
report = [
{
"file": file,
"line": line,
"title": "DocsGen {}".format(level),
"message": msg,
"annotation_level": level
}
for file, line, msg, level in self.errlist
]
with open(self.REPORT_FILE, "w") as f:
f.write(json.dumps(report, sort_keys=False, indent=4))
def file_has_errors(self, file):
return file in self.badfiles
errorlog = ErrorLog()
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
|
from unittest import TestCase
import pandas as pd
from pytz import UTC
from test_trading_calendar import ExchangeCalendarTestBase
from trading_calendars.exchange_calendar_xwbo import XWBOExchangeCalendar
class XWBOCalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = 'xwbo'
calendar_class = XWBOExchangeCalendar
# The XWBO is open from 9:00 am to 5:30 pm.
MAX_SESSION_HOURS = 8.5
HAVE_EARLY_CLOSES = False
DAYLIGHT_SAVINGS_DATES = ['2018-03-12', '2018-11-05']
def test_normal_holidays(self):
expected_holidays = [
pd.Timestamp('2014-01-01', tz=UTC), # New Year's Day
pd.Timestamp('2014-01-06', tz=UTC), # Epiphany
pd.Timestamp('2014-04-18', tz=UTC), # Good Friday
pd.Timestamp('2014-04-21', tz=UTC), # Easter Monday
pd.Timestamp('2014-05-01', tz=UTC), # Labour Day
pd.Timestamp('2014-05-29', tz=UTC), # Ascension Day
pd.Timestamp('2014-06-09', tz=UTC), # Whit Monday
pd.Timestamp('2014-06-19', tz=UTC), # Corpus Christi
pd.Timestamp('2014-08-15', tz=UTC), # Assumption Day
pd.Timestamp('2014-10-26', tz=UTC), # National Day (Weekend)
pd.Timestamp('2015-10-26', tz=UTC), # National Day (Weekday)
pd.Timestamp('2013-11-01', tz=UTC), # All Saints Day (Weekday)
pd.Timestamp('2014-11-01', tz=UTC), # All Saints Day (Weekend)
pd.Timestamp('2014-12-08', tz=UTC), # Immaculate Conception
pd.Timestamp('2014-12-24', tz=UTC), # Christmas Eve
pd.Timestamp('2014-12-25', tz=UTC), # Christmas Day
pd.Timestamp('2014-12-26', tz=UTC), # St. Stephen's Day
pd.Timestamp('2014-12-31', tz=UTC), # New Year's Eve
]
for session_label in expected_holidays:
self.assertNotIn(session_label, self.calendar.all_sessions)
def test_normal_holidays_after_2018(self):
expected_holidays = [
pd.Timestamp('2019-01-01', tz=UTC), # New Year's Day
pd.Timestamp('2019-04-19', tz=UTC), # Good Friday
pd.Timestamp('2019-04-22', tz=UTC), # Easter Monday
pd.Timestamp('2019-05-01', tz=UTC), # Labour Day
pd.Timestamp('2019-06-10', tz=UTC), # Whit Monday
pd.Timestamp('2019-10-26', tz=UTC), # National Day (Weekend)
pd.Timestamp('2019-12-24', tz=UTC), # Christmas Eve
pd.Timestamp('2019-12-25', tz=UTC), # Christmas Day
pd.Timestamp('2019-12-26', tz=UTC), # St. Stephen's Day
pd.Timestamp('2019-12-31', tz=UTC), # New Year's Eve
]
for session_label in expected_holidays:
self.assertNotIn(session_label, self.calendar.all_sessions)
def test_holidays_fall_on_weekend(self):
# Holidays falling on a weekend should generally not be made up
# during the week, so test that the Fridays and Mondays surrounding
# them are open market days.
expected_sessions = [
# Epiphany (January 6th) on a Saturday.
pd.Timestamp('2018-01-05', tz=UTC),
pd.Timestamp('2018-01-08', tz=UTC),
# Assumption Day (August 15th) on a Saturday.
pd.Timestamp('2015-08-14', tz=UTC),
pd.Timestamp('2015-08-17', tz=UTC),
# Labour Day (May 1st) on a Saturday.
pd.Timestamp('2010-04-30', tz=UTC),
pd.Timestamp('2010-05-03', tz=UTC),
# National Day (October 26th) on a Sunday.
pd.Timestamp('2014-10-24', tz=UTC),
pd.Timestamp('2014-10-27', tz=UTC),
# All Saints Day (November 1st) on a Sunday.
pd.Timestamp('2015-10-30', tz=UTC),
pd.Timestamp('2015-11-02', tz=UTC),
# Immaculate Conception (December 8th) on a Saturday.
pd.Timestamp('2018-12-07', tz=UTC),
pd.Timestamp('2018-12-10', tz=UTC),
# Christmas Eve on a Saturday and Christmas on a Sunday. This means
# that the market should be open on the previous Friday, closed on
# Monday for St. Stephen's Day, and open again on Tuesday.
pd.Timestamp('2011-12-23', tz=UTC),
pd.Timestamp('2011-12-27', tz=UTC),
]
for session_label in expected_sessions:
self.assertIn(session_label, self.calendar.all_sessions)
def test_new_years_eve_falls_on_weekend(self):
# Prior to 2016, when New Year's Eve fell on the weekend, it was
# observed on the preceding Friday.
expected_holidays = [
# New Year's Eve on a Saturday, observed on Friday 12/30.
pd.Timestamp('2011-12-30', tz=UTC),
# New Year's Eve on a Sunday, observed on Friday 12/29.
pd.Timestamp('2006-12-29', tz=UTC),
]
for holiday_label in expected_holidays:
self.assertNotIn(holiday_label, self.calendar.all_sessions)
# In 2016 and after, it is not made up.
expected_sessions = [
# New Year's Eve on a Saturday, Friday 12/30 is a trading day.
pd.Timestamp('2016-12-30', tz=UTC),
# New Year's Eve on a Sunday, Friday 12/29 is a trading day.
pd.Timestamp('2017-12-29', tz=UTC),
]
for session_label in expected_sessions:
self.assertIn(session_label, self.calendar.all_sessions)
|
{"akina saegusa": "nijisanji", "ange katrina": "nijisanji", "ars almal": "nijisanji", "debidebi debiru": "nijisanji", "dola": "nijisanji", "fuwa minato": "nijisanji", "fuyuki hakase": "nijisanji", "haru kaida": "nijisanji", "hayato kagami": "nijisanji", "kai mayuzumi": "nijisanji", "kana sukoya": "nijisanji", "kanae": "nijisanji", "kokoro amamiya": "nijisanji", "kuzuha": "nijisanji", "lize helesta": "nijisanji", "lulu suzuhara": "nijisanji", "maimoto keisuke": "nijisanji", "mirei gundo": "nijisanji", "nui sorciere": "nijisanji", "rena yorumi": "nijisanji", "roa yuzuki": "nijisanji", "ryushen": "nijisanji", "saku sasaki": "nijisanji", "sara hoshikawa": "nijisanji", "toko inui": "nijisanji", "tomoe shirayuki": "nijisanji", "yuika shiina": "nijisanji", "yuki chihiro": "nijisanji", "alice mononobe": "nijisanji", "elu": "nijisanji", "honma himawari": "nijisanji", "kaede higuchi": "nijisanji", "mikoto rindou": "nijisanji", "mito tsukino": "nijisanji", "mugi ienaga": "nijisanji", "rin shizuka": "nijisanji", "sister claire": "nijisanji", "touya kenmochi": "nijisanji", "yashiro kizuku": "nijisanji", "youko akabane": "nijisanji"} |
import os
import requests
import json
import csv
import pandas as pd
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ.get("API_KEY")
def get_zipcode(csv_file_path):
with open(csv_file_path,'r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
street = row['\ufeffStreet']
street_new = street.replace(' ','%20')
city = row['District']
city_new = city.replace(' ','%20')
state = row['State']
url = f'https://api.zip-codes.com/ZipCodesAPI.svc/1.0/ZipCodeOfAddress?address={street_new}&address1=&city={city_new}&state={state}&zipcode=&key={apikey}'
#print(url)
response = requests.get(url)
parsed_response = json.loads(response.text)
for key in parsed_response.keys():
if key == 'Result':
zipcode = parsed_response['Result']['Address']['Zip5']
elif key == 'Error':
zipcode = 'Wrong'
else:
zipcode = 'Other problems'
print(street,zipcode)
with open('All.csv','a+') as csv_file2:
writer = csv.DictWriter(csv_file2,fieldnames=['Street','Zipcode'])
writer.writerow({'Street':street,'Zipcode':zipcode}) |
import sys
from _functools import reduce
import configurations as conf
import pagerank_utils as pru
import topic_specific_pagerank as tspr
import WebIR_HW_2_part_2 as part2
def _print_usage():
usage='''
Usage:
python WebIR_HW_2_part_4.py <graph_path> <user_ratings> <user_group_weights> [--verbose]
Example:
python WebIR_HW_2_part_4.py ./datasets/movie_graph.txt ./datasets/user_movie_rating.txt 1683_1__1684_2 --verbose
'''
print(usage)
def main():
if len(sys.argv)>=4 and len(sys.argv)<=5:
movie_graph_path = sys.argv[1]
user_ratings_path = sys.argv[2]
users_weights_pairs = dict([(int(user.split('_')[0]), int(user.split('_')[1])) for user in sys.argv[3].split('__')])
if len(sys.argv)==5 and sys.argv[4]=="--verbose": verbose=True
else:
_print_usage()
exit(1)
# read the whole movie graph from its adjacency list
movie_graph = pru.read_movie_graph(movie_graph_path)
# retrieve ALL user ratings (we can improve it reading only those line of our userid):
user_ratings = pru.read_user_movie_rating(user_ratings_path)
# we have a dictionary {userid: {movie_id: rating}}
# get the list of ratings of the group, i.e. from every user.
# at the end we have a dict: {userid: {movie_id: rate}}
group_ratings = dict([(userid, user_ratings[userid]) for userid in users_weights_pairs])
# compute the pagerank of the group from the group ratings
filtered_group_pagerank = _pagerank_from_group_ratings(movie_graph, group_ratings, users_weights_pairs)
# sort by score
sorted_and_filtered_group_pagerank = sorted(filtered_group_pagerank, key=lambda x: -x[1])
pru.print_pagerank_list(sorted_and_filtered_group_pagerank)
return sorted_and_filtered_group_pagerank
def _pagerank_from_group_ratings(movie_graph, group_ratings, users_weights_pairs, verbose=False):
all_movies = movie_graph.nodes()
merged_teleporting_distribution = {}
users_distributions = []
for userid in group_ratings:
cur_userrating = group_ratings[userid]
users_distributions.append(part2._compute_teleport_distribution_from_ratings(cur_userrating, all_movies))
# list comprehension on user_weights_pairs, instead of .values(), for preserve order (?)
merged_teleporting_distribution = pru.merge_distributions(users_distributions, [w for _, w in users_weights_pairs.items()])
# compute pagerank from the resulting distribution
group_pagerank = tspr.compute_topic_specific_pagerank(movie_graph, merged_teleporting_distribution)
# compute all the movies seen from the group: it is used for filtering
all_seen_movies_from_group = set([m for seen_movies_one_user in group_ratings.values() for m in seen_movies_one_user.keys()])
# filter the result with the already seen film
filtered_final_pagerank = filter(lambda x: x[0] not in all_seen_movies_from_group, group_pagerank.items())
return filtered_final_pagerank
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.1 on 2020-03-25 13:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Kabupaten',
fields=[
('id_kabupaten', models.AutoField(primary_key=True, serialize=False)),
('nama', models.CharField(max_length=100)),
('lon', models.CharField(max_length=128)),
('lat', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Penderita',
fields=[
('id_penderita', models.AutoField(primary_key=True, serialize=False)),
('nama_lengkap', models.CharField(max_length=250)),
('gender', models.CharField(choices=[('laki-laki', 'LAKI-LAKI'), ('perempuan', 'PEREMPUAN')], default='laki-laki', max_length=15)),
('status', models.CharField(choices=[('sembuh', 'SEMBUH'), ('meninggal', 'MENINGGAL')], default='sembuh', max_length=15)),
('lokasi', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='appbackend.Kabupaten')),
],
),
]
|
"""
Filename / URL patterns.
"""
import inspect
from dataclasses import dataclass, field, replace
from enum import Enum
from itertools import product
from typing import (
Any,
Callable,
ClassVar,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
)
class CombineOp(Enum):
"""Used to uniquely identify different combine operations across Pangeo Forge Recipes.
"""
MERGE = 1
CONCAT = 2
SUBSET = 3
@dataclass(frozen=True)
class ConcatDim:
"""Represents a concatenation operation across a dimension of a FilePattern.
:param name: The name of the dimension we are concatenating over. For
files with labeled dimensions, this should match the dimension name
within the file. The most common value is ``"time"``.
:param keys: The keys used to represent each individual item along this
dimension. This will be used by a ``FilePattern`` object to evaluate
the file name.
:param nitems_per_file: If each file contains the exact same known number of
items in each file along the concat dimension, this can be set to
provide a fast path for recipes.
"""
name: str # should match the actual dimension name
keys: Sequence[Any] = field(repr=False)
nitems_per_file: Optional[int] = None
operation: ClassVar[CombineOp] = CombineOp.CONCAT
@dataclass(frozen=True)
class MergeDim:
"""Represents a merge operation across a dimension of a FilePattern.
:param name: The name of the dimension we are are merging over. The actual
value is not used by most recipes. The most common value is
``"variable"``.
:param keys: The keys used to represent each individual item along this
dimension. This will be used by a ``FilePattern`` object to evaluate
the file name.
"""
name: str
keys: Sequence[Any] = field(repr=False)
operation: ClassVar[CombineOp] = CombineOp.MERGE
@dataclass(frozen=True)
class DimIndex:
"""Object used to index a single dimension of a FilePattern or Recipe Chunks.
:param name: The name of the dimension.
:param index: The position of the item within the sequence.
:param sequence_len: The total length of the sequence.
:param operation: What type of Combine Operation does this dimension represent.
"""
name: str
index: int
sequence_len: int
operation: CombineOp
def __str__(self):
return f"{self.name}-{self.index}"
def __post_init__(self):
assert self.sequence_len > 0
assert self.index >= 0
assert self.index < self.sequence_len
class Index(tuple):
"""A tuple of ``DimIndex`` objects.
The order of the indexes doesn't matter for comparision."""
def __new__(self, args: Iterable[DimIndex]):
# This validation really slows things down because we call Index a lot!
# if not all((isinstance(a, DimIndex) for a in args)):
# raise ValueError("All arguments must be DimIndex.")
# args_set = set(args)
# if len(set(args_set)) < len(tuple(args)):
# raise ValueError("Duplicate argument detected.")
return tuple.__new__(Index, args)
def __str__(self):
return ",".join(str(dim) for dim in self)
def __eq__(self, other):
return (set(self) == set(other)) and (len(self) == len(other))
def __hash__(self):
return hash(frozenset(self))
CombineDim = Union[MergeDim, ConcatDim]
FilePatternIndex = Index
class FilePattern:
"""Represents an n-dimensional matrix of individual files to be combined
through a combination of merge and concat operations. Each operation generates
a new dimension to the matrix.
:param format_function: A function that takes one argument for each
combine_op and returns a string representing the filename / url paths.
Each argument name should correspond to a ``name`` in the ``combine_dims``
list.
:param combine_dims: A sequence of either concat or merge dimensions. The outer
product of the keys is used to generate the full list of file paths.
:param fsspec_open_kwargs: Extra options for opening the inputs with fsspec.
May include ``block_size``, ``username``, ``password``, etc.
:param query_string_secrets: If provided, these key/value pairs are appended to
the query string of each ``file_pattern`` url at runtime.
:param is_opendap: If True, assume all input fnames represent opendap endpoints.
Cannot be used with caching.
"""
def __init__(
self,
format_function: Callable,
*combine_dims: CombineDim,
fsspec_open_kwargs: Optional[Dict[str, Any]] = None,
query_string_secrets: Optional[Dict[str, str]] = None,
is_opendap: bool = False,
):
self.format_function = format_function
self.combine_dims = combine_dims
self.fsspec_open_kwargs = fsspec_open_kwargs if fsspec_open_kwargs else {}
self.query_string_secrets = query_string_secrets if query_string_secrets else {}
self.is_opendap = is_opendap
if self.fsspec_open_kwargs and self.is_opendap:
raise ValueError(
"OPeNDAP inputs are not opened with `fsspec`. "
"`is_opendap` must be `False` when passing `fsspec_open_kwargs`."
)
def __repr__(self):
return f"<FilePattern {self.dims}>"
@property
def dims(self) -> Dict[str, int]:
"""Dictionary representing the dimensions of the FilePattern. Keys are
dimension names, values are the number of items along each dimension."""
return {op.name: len(op.keys) for op in self.combine_dims}
@property
def shape(self) -> Tuple[int, ...]:
"""Shape of the filename matrix."""
return tuple([len(op.keys) for op in self.combine_dims])
@property
def merge_dims(self) -> List[str]:
"""List of dims that are merge operations"""
return [op.name for op in self.combine_dims if op.operation == CombineOp.MERGE]
@property
def concat_dims(self) -> List[str]:
"""List of dims that are concat operations"""
return [op.name for op in self.combine_dims if op.operation == CombineOp.CONCAT]
@property
def nitems_per_input(self) -> Dict[str, Union[int, None]]:
"""Dictionary mapping concat dims to number of items per file."""
nitems = {} # type: Dict[str, Union[int, None]]
for op in self.combine_dims:
if isinstance(op, ConcatDim):
if op.nitems_per_file:
nitems[op.name] = op.nitems_per_file
else:
nitems[op.name] = None
return nitems
@property
def concat_sequence_lens(self) -> Dict[str, Optional[int]]:
"""Dictionary mapping concat dims to sequence lengths.
Only available if ``nitems_per_input`` is set on the dimension."""
return {
dim_name: (nitems * self.dims[dim_name] if nitems is not None else None)
for dim_name, nitems in self.nitems_per_input.items()
}
def __getitem__(self, indexer: FilePatternIndex) -> str:
"""Get a filename path for a particular key. """
assert len(indexer) == len(self.combine_dims)
format_function_kwargs = {}
for idx in indexer:
dims = [
dim
for dim in self.combine_dims
if dim.name == idx.name and dim.operation == idx.operation
]
if len(dims) != 1:
raise KeyError(r"Could not valid combine_dim for indexer {idx}")
dim = dims[0]
format_function_kwargs[dim.name] = dim.keys[idx.index]
fname = self.format_function(**format_function_kwargs)
return fname
def __iter__(self) -> Iterator[FilePatternIndex]:
"""Iterate over all keys in the pattern. """
for val in product(*[range(n) for n in self.shape]):
index = Index(
(
DimIndex(op.name, v, len(op.keys), op.operation)
for op, v in zip(self.combine_dims, val)
)
)
yield index
def items(self):
"""Iterate over key, filename pairs."""
for key in self:
yield key, self[key]
def pattern_from_file_sequence(file_list, concat_dim, nitems_per_file=None, **kwargs):
"""Convenience function for creating a FilePattern from a list of files."""
keys = list(range(len(file_list)))
concat = ConcatDim(name=concat_dim, keys=keys, nitems_per_file=nitems_per_file)
def format_function(**kwargs):
return file_list[kwargs[concat_dim]]
return FilePattern(format_function, concat, **kwargs)
def prune_pattern(fp: FilePattern, nkeep: int = 2) -> FilePattern:
"""
Create a smaller pattern from a full pattern.
Keeps all MergeDims but only the first `nkeep` items from each ConcatDim
:param fp: The original pattern.
:param nkeep: The number of items to keep from each ConcatDim sequence.
"""
new_combine_dims = [] # type: List[CombineDim]
for cdim in fp.combine_dims:
if isinstance(cdim, MergeDim):
new_combine_dims.append(cdim)
elif isinstance(cdim, ConcatDim):
new_keys = cdim.keys[:nkeep]
new_cdim = replace(cdim, keys=new_keys)
new_combine_dims.append(new_cdim)
else: # pragma: no cover
assert "Should never happen"
sig = inspect.signature(fp.__init__) # type: ignore
kwargs = {
param: getattr(fp, param)
for param in sig.parameters.keys()
if param not in ["format_function", "combine_dims"]
}
return FilePattern(fp.format_function, *new_combine_dims, **kwargs)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This module contains class MRT_Metadata_Parser
The purpose of this class is to prepare MRT files for extrapolator.
This is done through a series of steps.
Read README for in depth explanation.
"""
__authors__ = ["Justin Furuness"]
__credits__ = ["Justin Furuness"]
__Lisence__ = "BSD"
__maintainer__ = "Justin Furuness"
__email__ = "jfuruness@gmail.com"
__status__ = "Production"
import bisect
import datetime
import logging
import os
import warnings
import psycopg2
import requests
from ....utils.base_classes import Parser
from ...roas.tables import ROAs_Table
from ..mrt_base.mrt_file import MRT_File
from ..mrt_base.mrt_installer import MRT_Installer
from ..mrt_base.mrt_sources import MRT_Sources
from ..mrt_base.tables import MRT_Announcements_Table
from .tables import Distinct_Prefix_Origins_Table
from .tables import Prefix_IDs_Table
from .tables import Origin_IDs_Table
from .tables import Prefix_Origin_IDs_Table
from .tables import Distinct_Prefix_Origins_W_IDs_Table
from .tables import Blocks_Table
from .tables import ROA_Known_Validity_Table
from .tables import ROA_Validity_Table
from .tables import Prefix_Origin_Blocks_Metadata_Table
from .tables import Prefix_Origin_Metadata_Table
from .tables import MRT_W_Metadata_Table
from ....utils import utils
class MRT_Metadata_Parser(Parser):
"""This class downloads, parses, and deletes files from Caida.
In depth explanation at the top of module.
"""
__slots__ = []
def _run(self, *args, max_block_size=400):
"""Adds metadata to MRT files and prepares for EXR insertion
1. Adds ROA state
2. Adds prefix_id (id unique to each prefix)
3. Adds monitor_asn (last ASN in the path)
4. Adds block_id (block_id for insertion into exr
needed or else it won't fit in RAM)
5. Adds block_prefix_id (prefix ids in block. Used to compare prefixes from
in block in exr for determining best path
since you can use this as a key in a list
instead of a hashmap (inside extrapolator))
6. JK, add as many indexes as you can think of. Used in Forecast,
verification, Full path, etc, so just add them all.
"""
self._validate()
self._add_prefix_origin_index()
logging.info(f"Creating {Distinct_Prefix_Origins_Table.name}")
self._get_p_o_table_w_indexes(Distinct_Prefix_Origins_Table)
# If you were a real cool cat, you would have done a compressed
# trie, finding common ancestors, to get prefix groupings
# def way faster than all this. Also more difficult.
for Table in [Origin_IDs_Table,
Prefix_IDs_Table,
Prefix_Origin_IDs_Table,
Distinct_Prefix_Origins_W_IDs_Table]:
logging.info(f"Creating {Table.__name__}")
self._get_p_o_table_w_indexes(Table)
self._create_block_table(max_block_size)
self._add_roas_index()
for Table in [ROA_Known_Validity_Table,
ROA_Validity_Table,
Prefix_Origin_Blocks_Metadata_Table,
Prefix_Origin_Metadata_Table]:
self._get_p_o_table_w_indexes(Table)
self._add_metadata()
def _validate(self):
"""Asserts that tables are filled"""
for Table in [MRT_Announcements_Table, ROAs_Table]:
with Table() as db:
err = f"{db.name} not filled"
sql = f"SELECT * FROM {db.name} LIMIT 2"
assert len(db.execute(sql)) > 0, err
def _add_prefix_origin_index(self):
"""Adds index to prefix and origin for combining with ROAs table"""
with MRT_Announcements_Table() as db:
sql = f"""CREATE INDEX IF NOT EXISTS {db.name}_po_index ON
{db.name} USING GIST(prefix inet_ops, origin)"""
self._create_index(sql, db)
sql = f"""CREATE INDEX IF NOT EXISTS {db.name}_po_btree_i ON
{db.name}(prefix inet_ops, origin);"""
self._create_index(sql, db)
def _get_p_o_table_w_indexes(self, Table):
"""Prefix origin table with indexes"""
with Table(clear=True) as db:
db.fill_table()
index_sqls = [
f"""CREATE INDEX IF NOT EXISTS {db.name}_dpo_index
ON {db.name} USING GIST(prefix inet_ops, origin)""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_dist_p_index
ON {db.name} USING GIST(prefix inet_ops)""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_dist_o_index
ON {db.name}(origin)""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_g_index
ON {db.name}(prefix_group_id);""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_pbtree_index
ON {db.name}(prefix)""",
f"""CREATE INDEX IF NOT EXISTS {db.name}_po_btree_index
ON {db.name}(prefix, origin);"""
]
for sql in index_sqls:
try:
self._create_index(sql, db)
except psycopg2.errors.UndefinedColumn:
pass
def _create_block_table(self, max_block_size):
"""Creates iteration blocks as balanced as possible
Based on prefix, total # ann for that prefix
Needed to write a custom algo for this
but it's fine, since binpacking is already np hard
Figures out first correct number of bins, since prefixes
are most important
Then figures out which bin to place in
Largest ann_count first into smallest bin_weight
"""
class Bin:
def __init__(self, bin_id):
self.bin_id = bin_id
self.prefixes = []
self.total_weight = 0
def add_prefix(self, prefix, ann_count):
if len(self.prefixes) + 1 <= max_block_size:
self.prefixes.append(prefix)
self.total_weight += ann_count
return True
else:
return False
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.total_weight < other.total_weight
@property
def rows(self):
return [[self.bin_id, x] for x in self.prefixes]
logging.info("Getting prefix blocks")
with Prefix_IDs_Table() as db:
group_counts = [[x["prefix"], x["ann_count"]]
for x in db.get_all()]
group_counts = sorted(group_counts, key=lambda x: x[1], reverse=True)
bin_count = (len(group_counts) // max_block_size) + 1
bins = list(sorted([Bin(i) for i in range(bin_count)]))
# tbh, is this the same as just doing it in order?
# Should check this...
for i, (prefix, ann_count) in enumerate(group_counts):
for b_index, b in enumerate(bins):
if b.add_prefix(prefix, ann_count):
current_index = b_index
break
# Inserts item in sorted list correctly
# MUCH faster than sort
# https://stackoverflow.com/a/38346428/8903959
bisect.insort_left(bins, bins.pop(b_index))
block_table_rows = []
for current_bin in bins:
block_table_rows.extend(current_bin.rows)
csv_path = os.path.join(self.csv_dir, "block_table.csv")
utils.rows_to_db(block_table_rows, csv_path, Blocks_Table)
for _id in ["block_id", "prefix"]:
sql = f"""CREATE INDEX IF NOT EXISTS
{Blocks_Table.name}_{_id}
ON {Blocks_Table.name}({_id})
;"""
self._create_index(sql, db)
def _add_roas_index(self):
"""Creates an index on the roas table"""
with ROAs_Table() as db:
sql = f"""CREATE INDEX IF NOT EXISTS roas_index
ON {db.name} USING GIST(prefix inet_ops, asn);"""
self._create_index(sql, db)
def _add_metadata(self):
"""Joins prefix origin metadata with MRT Anns"""
logging.info("Adding metadata to the MRT announcements")
with MRT_W_Metadata_Table(clear=True) as db:
db.fill_table()
sql = f"""CREATE INDEX {db.name}_block_index
ON {db.name}(block_id);"""
self._create_index(sql, db)
# NOTE: you probably need other indexes on this table
# Depending on what application is being run
def _create_index(self, sql, db):
logging.info(f"Creating index on {db.name}")
db.execute(sql)
logging.info("Index complete")
################
### Old Code ###
################
def _create_block_table_w_prefix_groups(self, max_block_size):
"""Legacy code now
This can be used for creating blocks with groups
We didn't need to tackle this problem for our phd
We leave it for the next runner up.
Creates blocks for the extrapolator
1. Counts number of prefixes per group
2. Packs them into blocks with a fixed max size
3. Creates the block id table
-contains group_id, block_id
"""
logging.info("Getting prefix blocks")
with Distinct_Prefix_Origins_W_IDs_Table() as db:
sql = f"""SELECT prefix_group_id, COUNT(prefix_group_id) AS total
FROM {db.name}
GROUP BY prefix_group_id;"""
group_counts = db.execute(sql)
group_counts_dict = {x["prefix_group_id"]: x["total"]
for x in group_counts}
# Returns a list of dicts, that contains group_id: count
bins = binpacking.to_constant_volume(group_counts_dict, max_block_size)
block_table_rows = []
for block_id, current_bin in enumerate(bins):
for group_id in current_bin:
block_table_rows.append([block_id, group_id])
csv_path = os.path.join(self.csv_dir, "block_table.csv")
utils.rows_to_db(block_table_rows, csv_path, Blocks_Table)
for _id in ["block_id", "prefix_group_id"]:
sql = f"""CREATE INDEX IF NOT EXISTS
{Blocks_Table.name}_{_id} ON {Blocks_Table.name}({_id})
;"""
self._create_index(sql, db)
|
"""
Template App - costum class template to efficiently handle objects
"""
# general imports
import numpy as np
# bokeh imports
from bokeh.models import ColumnDataSource
# internal imports
from TA_constants import xsl, ysl, xsr, ysr
# latex integration
#---------------------------------------------------------------------#
# put the intials of the app before the class name
class TA_example_class():
# this function is called when an object of this class is built
# you need at least "self" as an argument
# set default assignments if your function should also be callable with less parameters
def __init__(self, some_num, inital_string="default"):
# define class variables here
# all class internal variables need the self to be used in other parts of the class
self.some_num = some_num
self.test_string = inital_string
# you can also build ColumnDataSources here
self.test_cds = ColumnDataSource(data=dict(x=[], y=[]))
# optional - called when print(my_object) is used
# nice to have for debugging
def __str__(self):
tmp_str = "Example Object:\n"
tmp_str += " num: " + str(self.some_num) + "\n"
tmp_str += " string: " + self.test_string + "\n"
return tmp_str
# for other optional Python specific class function see the Python documenation
# like e.g. __add__ or __truediv__
# use getters and setters instead of directly accessing the class variables from outside
# this makes the code more flexible, especially for more complex constructs
def set_string(self, new_string):
self.test_string = new_string
def get_string(self):
return self.test_string
# plot function using a figure handle
def plot_line(self,fig):
self._update_cds()
fig.line(x='x', y='y', source=self.test_cds)
# use an underscore _ at the beginning of the function name to indicate that
# this function is only to be used inside the class (there is no private keyword in Python)
def _update_cds(self):
self.test_cds.data = dict(x=[1,3], y=[1,0.5]) # direct update
#self.test_cds.stream(dict(x=[0,4], y=[1,1]),rollover=2) # or stream and rollover with size of the columns
# you can of course add more classes here
# depending on your structure and complexity it could make sense to have several classes in one file,
# or creating a new file for each class |
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
import pytest
import unittest.mock as mock
# external packages
import matplotlib
matplotlib.use('Qt5Agg')
from PyQt5.QtGui import QCloseEvent
import matplotlib.pyplot as plt
from skyfield.api import EarthSatellite
from skyfield.api import Angle
from skyfield.api import load
import numpy as np
# local import
from tests.unit_tests.unitTestAddOns.baseTestSetupExtWindows import App
from gui.utilities.toolsQtWidget import MWidget
from gui.extWindows.satelliteW import SatelliteWindow
@pytest.fixture(autouse=True, scope='module')
def ts(qapp):
ts = load.timescale(builtin=True)
yield ts
@pytest.fixture(autouse=True, scope='function')
def function(ts):
window = SatelliteWindow(app=App())
window.app.mount.obsSite.ts = ts
yield window
def test_initConfig_1(function):
suc = function.initConfig()
assert suc
def test_initConfig_2(function):
suc = function.initConfig()
assert suc
function.app.config['satelliteW'] = {'winPosX': 10000}
suc = function.initConfig()
assert suc
def test_initConfig_3(function):
suc = function.initConfig()
assert suc
function.app.config['satelliteW'] = {'winPosY': 10000}
suc = function.initConfig()
assert suc
def test_initConfig_4(function):
function.app.config['satelliteW'] = {}
function.app.config['satelliteW']['winPosX'] = 100
function.app.config['satelliteW']['winPosY'] = 100
suc = function.initConfig()
assert suc
def test_storeConfig_1(function):
if 'satelliteW' in function.app.config:
del function.app.config['satelliteW']
suc = function.storeConfig()
assert suc
def test_storeConfig_2(function):
function.app.config['satelliteW'] = {}
suc = function.storeConfig()
assert suc
def test_closeEvent_1(function):
function.app.mount.signals.pointDone.connect(function.updatePointerAltAz)
with mock.patch.object(function,
'show'):
with mock.patch.object(MWidget,
'closeEvent'):
function.closeEvent(QCloseEvent)
def test_showWindow(function):
with mock.patch.object(MWidget,
'show'):
suc = function.showWindow()
assert suc
def test_markerSatellite(function):
val = function.markerSatellite()
assert val is not None
def test_updatePointerAltAz_1(function):
function.pointerAltAz = None
suc = function.updatePointerAltAz(function.app.mount.obsSite)
assert not suc
def test_updatePointerAltAz_2(function):
axe, _ = function.generateFlat(widget=function.satEarthMat, horizon=False)
function.pointerAltAz, = axe.plot(0, 0)
function.app.mount.obsSite.Alt = Angle(degrees=80)
function.app.mount.obsSite.Az = None
suc = function.updatePointerAltAz(function.app.mount.obsSite)
assert not suc
def test_updatePointerAltAz_3(function):
axe, _ = function.generateFlat(widget=function.satEarthMat, horizon=False)
function.pointerAltAz, = axe.plot(0, 0)
function.app.mount.obsSite.Alt = None
function.app.mount.obsSite.Az = Angle(degrees=80)
suc = function.updatePointerAltAz(function.app.mount.obsSite)
assert not suc
def test_updatePointerAltAz_4(function):
axe, _ = function.generateFlat(widget=function.satEarthMat, horizon=False)
function.pointerAltAz, = axe.plot(0, 0)
function.app.mount.obsSite.Alt = Angle(degrees=80)
function.app.mount.obsSite.Az = Angle(degrees=80)
suc = function.updatePointerAltAz(function.app.mount.obsSite)
assert suc
def test_updatePositions_1(function):
suc = function.updatePositions()
assert not suc
def test_updatePositions_2(function):
suc = function.updatePositions(now='t')
assert not suc
def test_updatePositions_3(function):
suc = function.updatePositions(now='t', location='loc')
assert not suc
def test_updatePositions_4(function):
function.satellite = 1
suc = function.updatePositions(now='t', location='loc')
assert not suc
def test_updatePositions_5(function):
function.satellite = 1
function.plotSatPosEarth = 1
suc = function.updatePositions(now='t', location='loc')
assert not suc
def test_updatePositions_6(function):
function.satellite = 1
function.plotSatPosEarth = 1
function.plotSatPosHorizon = 1
suc = function.updatePositions(now='t', location='loc')
assert not suc
def test_updatePositions_7(function):
function.satellite = 1
function.plotSatPosEarth = 1
function.plotSatPosHorizon = 1
function.plotSatPosSphere1 = 1
suc = function.updatePositions(now='t', location='loc')
assert not suc
def test_updatePositions_8(function):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 15.73279710179072"]
function.satellite = EarthSatellite(*tle[1:3], name=tle[0])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
function.plotSatPosEarth, = plt.plot([1, 0], [1, 0])
function.plotSatPosHorizon, = plt.plot([1, 0], [1, 0])
function.plotSatPosSphere1, = ax.plot([1], [1], [1])
function.plotSatPosSphere2, = ax.plot([1], [1], [1])
function.ui.tabWidget.setCurrentIndex(0)
now = function.app.mount.obsSite.ts.now()
location = function.app.mount.obsSite.location
with mock.patch.object(function.plotSatPosSphere1,
'set_data_3d'):
with mock.patch.object(function.plotSatPosSphere2,
'set_data_3d'):
with mock.patch.object(function.plotSatPosEarth,
'set_data'):
with mock.patch.object(function.plotSatPosHorizon,
'set_data'):
suc = function.updatePositions(now=now, location=location)
assert suc
def test_updatePositions_9(function):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 15.73279710179072"]
function.satellite = EarthSatellite(*tle[1:3], name=tle[0])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
function.plotSatPosEarth, = plt.plot([1, 0], [1, 0])
function.plotSatPosHorizon, = plt.plot([1, 0], [1, 0])
function.plotSatPosSphere1, = ax.plot([1], [1], [1])
function.plotSatPosSphere2, = ax.plot([1], [1], [1])
function.ui.tabWidget.setCurrentIndex(1)
now = function.app.mount.obsSite.ts.now()
location = function.app.mount.obsSite.location
with mock.patch.object(function.plotSatPosSphere1,
'set_data_3d'):
with mock.patch.object(function.plotSatPosSphere2,
'set_data_3d'):
with mock.patch.object(function.plotSatPosEarth,
'set_data'):
with mock.patch.object(function.plotSatPosHorizon,
'set_data'):
suc = function.updatePositions(now=now, location=location)
assert suc
def test_makeCubeLimits_1(function):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
function.makeCubeLimits(ax)
def test_makeCubeLimits_2(function):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
function.makeCubeLimits(ax, hw=(1, 2, 3))
def test_makeCubeLimits_3(function):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
function.makeCubeLimits(ax, hw=3)
def test_drawSphere1_1(function):
suc = function.drawSphere1()
assert not suc
def test_drawSphere1_2(function, ts):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 00.03279710179072"]
satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
observe = satellite.at(ts.tt_jd(tt + np.arange(0, 1, 0.1)))
suc = function.drawSphere1(observe=observe)
assert suc
def test_drawSphere2_1(function):
function.app.mount.obsSite.location.latitude = Angle(degrees=45)
function.app.mount.obsSite.location.longitude = Angle(degrees=45)
suc = function.drawSphere2()
assert not suc
def test_drawSphere2_2(function, ts):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 00.03279710179072"]
satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
observe = satellite.at(ts.tt_jd(tt + np.arange(0, 1, 0.1)))
function.app.mount.obsSite.location.latitude = Angle(degrees=45)
function.app.mount.obsSite.location.longitude = Angle(degrees=45)
suc = function.drawSphere2(observe=observe)
assert suc
def test_unlinkWrap(function):
data = [1, 2, 3, 170, 180, -180, -100, 3, 4]
for slc in function.unlinkWrap(data):
a = slc
def test_drawEarth_1(function):
suc = function.drawEarth()
assert not suc
def test_drawEarth_2(function, ts):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 00.03279710179072"]
function.satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
t0 = ts.tt_jd(tt + 0)
t1 = ts.tt_jd(tt + 0.1)
t2 = ts.tt_jd(tt + 0.2)
t3 = ts.tt_jd(tt + 0.3)
t4 = ts.tt_jd(tt + 0.4)
satOrbits = [{'rise': t0,
'flip': t0,
'culminate': t0,
'settle': t1},
{'rise': t2,
'flip': t2,
'culminate': t2,
'settle': t3},
{'rise': t3,
'culminate': t3,
'flip': t3,
'settle': t4},
]
obsSite = function.app.mount.obsSite
suc = function.drawEarth(obsSite=obsSite, satOrbits=satOrbits)
assert suc
def test_drawEarth_3(function, ts):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 00.03279710179072"]
function.satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
t0 = ts.tt_jd(tt + 0)
t1 = ts.tt_jd(tt + 0.1)
t2 = ts.tt_jd(tt + 0.2)
t3 = ts.tt_jd(tt + 0.3)
t4 = ts.tt_jd(tt + 0.4)
satOrbits = [{'rise': t0,
'flip': t0,
'culminate': t0,
'settle': t1},
{'rise': t2,
'culminate': t2,
'settle': t3},
{'rise': t3,
'culminate': t3,
'flip': t3,
'settle': t4},
]
obsSite = function.app.mount.obsSite
suc = function.drawEarth(obsSite=obsSite, satOrbits=satOrbits)
assert suc
def test_staticHorizon_1(function):
function.app.data.horizonP = []
axe, _ = function.generateFlat(widget=function.satHorizonMat, horizon=False)
suc = function.staticHorizon(axe)
assert not suc
def test_staticHorizon_2(function):
axe, _ = function.generateFlat(widget=function.satHorizonMat, horizon=False)
function.app.data.horizonP = [(0, 0), (0, 360)]
suc = function.staticHorizon(axe)
assert suc
def test_markerAltAz(function):
function.markerAltAz()
def test_drawHorizonView_1(function):
suc = function.drawHorizonView()
assert not suc
def test_drawHorizonView_2(function, ts):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 00.03279710179072"]
function.satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
t0 = ts.tt_jd(tt + 0)
t1 = ts.tt_jd(tt + 0.1)
t2 = ts.tt_jd(tt + 0.2)
t3 = ts.tt_jd(tt + 0.3)
t4 = ts.tt_jd(tt + 0.4)
satOrbits = [{'rise': t0,
'flip': t0,
'culminate': t0,
'settle': t1},
{'rise': t2,
'flip': t2,
'culminate': t2,
'settle': t3},
{'rise': t3,
'culminate': t3,
'flip': t3,
'settle': t4},
]
obsSite = function.app.mount.obsSite
suc = function.drawHorizonView(obsSite=obsSite, satOrbits=satOrbits)
assert suc
def test_drawHorizonView_3(function, ts):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 00.03279710179072"]
function.satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
t0 = ts.tt_jd(tt + 0)
t1 = ts.tt_jd(tt + 0.1)
t2 = ts.tt_jd(tt + 0.2)
t3 = ts.tt_jd(tt + 0.3)
t4 = ts.tt_jd(tt + 0.4)
satOrbits = [{'rise': t0,
'flip': t0,
'culminate': t0,
'settle': t1},
{'rise': t2,
'culminate': t2,
'settle': t3},
{'rise': t3,
'culminate': t3,
'flip': t3,
'settle': t4},
]
obsSite = function.app.mount.obsSite
suc = function.drawHorizonView(obsSite=obsSite, satOrbits=satOrbits)
assert suc
def test_drawSatellite_1(function):
with mock.patch.object(function,
'drawSphere1'):
with mock.patch.object(function,
'drawSphere2'):
with mock.patch.object(function,
'drawEarth'):
with mock.patch.object(function,
'drawHorizonView'):
suc = function.drawSatellite()
assert not suc
def test_drawSatellite_2(function, ts):
tle = ["TIANGONG 1",
"1 37820U 11053A 14314.79851609 .00064249 00000-0 44961-3 0 5637",
"2 37820 42.7687 147.7173 0010686 283.6368 148.1694 00.03279710179072"]
satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
t0 = ts.tt_jd(tt + 0)
t1 = ts.tt_jd(tt + 0.1)
t2 = ts.tt_jd(tt + 0.2)
t3 = ts.tt_jd(tt + 0.3)
t4 = ts.tt_jd(tt + 0.4)
satOrbits = [{'rise': t0,
'flip': t0,
'culminate': t0,
'settle': t1},
{'rise': t2,
'flip': t2,
'culminate': t2,
'settle': t3},
{'rise': t3,
'culminate': t3,
'flip': t3,
'settle': t4},
]
with mock.patch.object(function,
'drawSphere1'):
with mock.patch.object(function,
'drawSphere2'):
with mock.patch.object(function,
'drawEarth'):
with mock.patch.object(function,
'drawHorizonView'):
suc = function.drawSatellite(satellite=satellite,
satOrbits=satOrbits)
assert suc
def test_drawSatellite_3(function, ts):
tle = ["ISS (ZARYA)",
"1 25544U 98067A 21103.51063550 .00000247 00000-0 12689-4 0 9995",
"2 25544 51.6440 302.6231 0002845 223.0251 174.3348 15.48881931278570"]
satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
t0 = ts.tt_jd(tt + 0)
t1 = ts.tt_jd(tt + 0.1)
t2 = ts.tt_jd(tt + 0.2)
t3 = ts.tt_jd(tt + 0.3)
t4 = ts.tt_jd(tt + 0.4)
satOrbits = [{'rise': t0,
'flip': t0,
'culminate': t0,
'settle': t1},
{'rise': t2,
'flip': t2,
'culminate': t2,
'settle': t3},
{'rise': t3,
'culminate': t3,
'flip': t3,
'settle': t4},
]
with mock.patch.object(function,
'drawSphere1'):
with mock.patch.object(function,
'drawSphere2'):
with mock.patch.object(function,
'drawEarth'):
with mock.patch.object(function,
'drawHorizonView'):
suc = function.drawSatellite(satellite=satellite,
satOrbits=satOrbits)
assert suc
def test_drawSatellite_4(function, ts):
tle = ["ISS (ZARYA)",
"1 25544U 98067A 21103.51063550 .00000247 00000-0 12689-4 0 9995",
"2 25544 51.6440 302.6231 0002845 223.0251 174.3348 15.48881931278570"]
satellite = EarthSatellite(*tle[1:3], name=tle[0])
tt = ts.now().tt
t0 = ts.tt_jd(tt + 0)
t1 = ts.tt_jd(tt + 0.1)
t2 = ts.tt_jd(tt + 0.2)
t3 = ts.tt_jd(tt + 0.3)
t4 = ts.tt_jd(tt + 0.4)
satOrbits = [{'rise': t0,
'flip': t0,
'culminate': t0,
'settle': t1},
{'rise': t2,
'flip': t2,
'culminate': t2,
'settle': t3},
{'rise': t3,
'culminate': t3,
'flip': t3,
'settle': t4},
]
function.closing = True
suc = function.drawSatellite(satellite=satellite,
satOrbits=satOrbits)
assert not suc
|
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
from botocore.config import Config
from sagemaker import image_uris, Session
from sagemaker.dataset_definition.inputs import (
DatasetDefinition,
RedshiftDatasetDefinition,
AthenaDatasetDefinition,
S3Input,
)
from sagemaker.network import NetworkConfig
from sagemaker.processing import (
ProcessingInput,
ProcessingOutput,
ScriptProcessor,
Processor,
ProcessingJob,
FeatureStoreOutput,
)
from sagemaker.sklearn.processing import SKLearnProcessor
from tests.integ import DATA_DIR
from tests.integ.kms_utils import get_or_create_kms_key
ROLE = "SageMakerRole"
@pytest.fixture(scope="module")
def sagemaker_session_with_custom_bucket(
boto_session, sagemaker_client_config, sagemaker_runtime_config, custom_bucket_name
):
sagemaker_client_config.setdefault("config", Config(retries=dict(max_attempts=10)))
sagemaker_client = (
boto_session.client("sagemaker", **sagemaker_client_config)
if sagemaker_client_config
else None
)
runtime_client = (
boto_session.client("sagemaker-runtime", **sagemaker_runtime_config)
if sagemaker_runtime_config
else None
)
return Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
default_bucket=custom_bucket_name,
)
@pytest.fixture(scope="module")
def image_uri(
sklearn_latest_version,
sklearn_latest_py_version,
cpu_instance_type,
sagemaker_session,
):
return image_uris.retrieve(
"sklearn",
sagemaker_session.boto_region_name,
version=sklearn_latest_version,
py_version=sklearn_latest_py_version,
instance_type=cpu_instance_type,
)
@pytest.fixture(scope="module")
def volume_kms_key(sagemaker_session):
role_arn = sagemaker_session.expand_role(ROLE)
return get_or_create_kms_key(
sagemaker_session=sagemaker_session,
role_arn=role_arn,
alias="integ-test-processing-volume-kms-key-{}".format(
sagemaker_session.boto_session.region_name
),
)
@pytest.fixture(scope="module")
def input_kms_key(sagemaker_session):
role_arn = sagemaker_session.expand_role(ROLE)
return get_or_create_kms_key(
sagemaker_session=sagemaker_session,
role_arn=role_arn,
alias="integ-test-processing-input-kms-key-{}".format(
sagemaker_session.boto_session.region_name
),
)
@pytest.fixture(scope="module")
def output_kms_key(sagemaker_session):
role_arn = sagemaker_session.expand_role(ROLE)
return get_or_create_kms_key(
sagemaker_session=sagemaker_session,
role_arn=role_arn,
alias="integ-test-processing-output-kms-key-{}".format(
sagemaker_session.boto_session.region_name
),
)
def test_sklearn(sagemaker_session, sklearn_latest_version, cpu_instance_type):
script_path = os.path.join(DATA_DIR, "dummy_script.py")
input_file_path = os.path.join(DATA_DIR, "dummy_input.txt")
sklearn_processor = SKLearnProcessor(
framework_version=sklearn_latest_version,
role=ROLE,
instance_type=cpu_instance_type,
instance_count=1,
command=["python3"],
sagemaker_session=sagemaker_session,
base_job_name="test-sklearn",
)
sklearn_processor.run(
code=script_path,
inputs=[ProcessingInput(source=input_file_path, destination="/opt/ml/processing/inputs/")],
wait=False,
logs=False,
)
job_description = sklearn_processor.latest_job.describe()
assert len(job_description["ProcessingInputs"]) == 2
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 30
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 86400}
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert ROLE in job_description["RoleArn"]
@pytest.mark.release
def test_sklearn_with_customizations(
sagemaker_session, image_uri, sklearn_latest_version, cpu_instance_type, output_kms_key
):
input_file_path = os.path.join(DATA_DIR, "dummy_input.txt")
sklearn_processor = SKLearnProcessor(
framework_version=sklearn_latest_version,
role=ROLE,
command=["python3"],
instance_type=cpu_instance_type,
instance_count=1,
volume_size_in_gb=100,
volume_kms_key=None,
output_kms_key=output_kms_key,
max_runtime_in_seconds=3600,
base_job_name="test-sklearn-with-customizations",
env={"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"},
tags=[{"Key": "dummy-tag", "Value": "dummy-tag-value"}],
sagemaker_session=sagemaker_session,
)
sklearn_processor.run(
code=os.path.join(DATA_DIR, "dummy_script.py"),
inputs=[
ProcessingInput(
source=input_file_path,
destination="/opt/ml/processing/input/container/path/",
input_name="dummy_input",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
)
],
outputs=[
ProcessingOutput(
source="/opt/ml/processing/output/container/path/",
output_name="dummy_output",
s3_upload_mode="EndOfJob",
)
],
arguments=["-v"],
wait=True,
logs=True,
)
job_description = sklearn_processor.latest_job.describe()
assert job_description["ProcessingInputs"][0]["InputName"] == "dummy_input"
assert job_description["ProcessingInputs"][1]["InputName"] == "code"
assert job_description["ProcessingJobName"].startswith("test-sklearn-with-customizations")
assert job_description["ProcessingJobStatus"] == "Completed"
assert job_description["ProcessingOutputConfig"]["KmsKeyId"] == output_kms_key
assert job_description["ProcessingOutputConfig"]["Outputs"][0]["OutputName"] == "dummy_output"
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 100
assert job_description["AppSpecification"]["ContainerArguments"] == ["-v"]
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert job_description["AppSpecification"]["ImageUri"] == image_uri
assert job_description["Environment"] == {"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"}
assert ROLE in job_description["RoleArn"]
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 3600}
def test_sklearn_with_custom_default_bucket(
sagemaker_session_with_custom_bucket,
custom_bucket_name,
image_uri,
sklearn_latest_version,
cpu_instance_type,
output_kms_key,
):
input_file_path = os.path.join(DATA_DIR, "dummy_input.txt")
sklearn_processor = SKLearnProcessor(
framework_version=sklearn_latest_version,
role=ROLE,
command=["python3"],
instance_type=cpu_instance_type,
instance_count=1,
volume_size_in_gb=100,
volume_kms_key=None,
output_kms_key=output_kms_key,
max_runtime_in_seconds=3600,
base_job_name="test-sklearn-with-customizations",
env={"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"},
tags=[{"Key": "dummy-tag", "Value": "dummy-tag-value"}],
sagemaker_session=sagemaker_session_with_custom_bucket,
)
sklearn_processor.run(
code=os.path.join(DATA_DIR, "dummy_script.py"),
inputs=[
ProcessingInput(
source=input_file_path,
destination="/opt/ml/processing/input/container/path/",
input_name="dummy_input",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
)
],
outputs=[
ProcessingOutput(
source="/opt/ml/processing/output/container/path/",
output_name="dummy_output",
s3_upload_mode="EndOfJob",
)
],
arguments=["-v"],
wait=True,
logs=True,
)
job_description = sklearn_processor.latest_job.describe()
assert job_description["ProcessingInputs"][0]["InputName"] == "dummy_input"
assert custom_bucket_name in job_description["ProcessingInputs"][0]["S3Input"]["S3Uri"]
assert job_description["ProcessingInputs"][1]["InputName"] == "code"
assert custom_bucket_name in job_description["ProcessingInputs"][1]["S3Input"]["S3Uri"]
assert job_description["ProcessingJobName"].startswith("test-sklearn-with-customizations")
assert job_description["ProcessingJobStatus"] == "Completed"
assert job_description["ProcessingOutputConfig"]["KmsKeyId"] == output_kms_key
assert job_description["ProcessingOutputConfig"]["Outputs"][0]["OutputName"] == "dummy_output"
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 100
assert job_description["AppSpecification"]["ContainerArguments"] == ["-v"]
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert job_description["AppSpecification"]["ImageUri"] == image_uri
assert job_description["Environment"] == {"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"}
assert ROLE in job_description["RoleArn"]
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 3600}
def test_sklearn_with_no_inputs_or_outputs(
sagemaker_session, image_uri, sklearn_latest_version, cpu_instance_type
):
sklearn_processor = SKLearnProcessor(
framework_version=sklearn_latest_version,
role=ROLE,
command=["python3"],
instance_type=cpu_instance_type,
instance_count=1,
volume_size_in_gb=100,
volume_kms_key=None,
max_runtime_in_seconds=3600,
base_job_name="test-sklearn-with-no-inputs-or-outputs",
env={"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"},
tags=[{"Key": "dummy-tag", "Value": "dummy-tag-value"}],
sagemaker_session=sagemaker_session,
)
sklearn_processor.run(
code=os.path.join(DATA_DIR, "dummy_script.py"), arguments=["-v"], wait=True, logs=True
)
job_description = sklearn_processor.latest_job.describe()
assert job_description["ProcessingInputs"][0]["InputName"] == "code"
assert job_description["ProcessingJobName"].startswith("test-sklearn-with-no-inputs")
assert job_description["ProcessingJobStatus"] == "Completed"
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 100
assert job_description["AppSpecification"]["ContainerArguments"] == ["-v"]
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert job_description["AppSpecification"]["ImageUri"] == image_uri
assert job_description["Environment"] == {"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"}
assert ROLE in job_description["RoleArn"]
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 3600}
@pytest.mark.release
def test_script_processor(sagemaker_session, image_uri, cpu_instance_type, output_kms_key):
input_file_path = os.path.join(DATA_DIR, "dummy_input.txt")
script_processor = ScriptProcessor(
role=ROLE,
image_uri=image_uri,
command=["python3"],
instance_count=1,
instance_type=cpu_instance_type,
volume_size_in_gb=100,
volume_kms_key=None,
output_kms_key=output_kms_key,
max_runtime_in_seconds=3600,
base_job_name="test-script-processor",
env={"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"},
tags=[{"Key": "dummy-tag", "Value": "dummy-tag-value"}],
sagemaker_session=sagemaker_session,
)
script_processor.run(
code=os.path.join(DATA_DIR, "dummy_script.py"),
inputs=[
ProcessingInput(
source=input_file_path,
destination="/opt/ml/processing/input/container/path/",
input_name="dummy_input",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
)
],
outputs=[
ProcessingOutput(
source="/opt/ml/processing/output/container/path/",
output_name="dummy_output",
s3_upload_mode="EndOfJob",
)
],
arguments=["-v"],
wait=True,
logs=True,
)
job_description = script_processor.latest_job.describe()
assert job_description["ProcessingInputs"][0]["InputName"] == "dummy_input"
assert job_description["ProcessingInputs"][1]["InputName"] == "code"
assert job_description["ProcessingJobName"].startswith("test-script-processor")
assert job_description["ProcessingJobStatus"] == "Completed"
assert job_description["ProcessingOutputConfig"]["KmsKeyId"] == output_kms_key
assert job_description["ProcessingOutputConfig"]["Outputs"][0]["OutputName"] == "dummy_output"
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 100
assert job_description["AppSpecification"]["ContainerArguments"] == ["-v"]
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert job_description["AppSpecification"]["ImageUri"] == image_uri
assert job_description["Environment"] == {"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"}
assert ROLE in job_description["RoleArn"]
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 3600}
def test_script_processor_with_no_inputs_or_outputs(
sagemaker_session, image_uri, cpu_instance_type
):
script_processor = ScriptProcessor(
role=ROLE,
image_uri=image_uri,
command=["python3"],
instance_count=1,
instance_type=cpu_instance_type,
volume_size_in_gb=100,
volume_kms_key=None,
max_runtime_in_seconds=3600,
base_job_name="test-script-processor-with-no-inputs-or-outputs",
env={"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"},
tags=[{"Key": "dummy-tag", "Value": "dummy-tag-value"}],
sagemaker_session=sagemaker_session,
)
script_processor.run(
code=os.path.join(DATA_DIR, "dummy_script.py"), arguments=["-v"], wait=True, logs=True
)
job_description = script_processor.latest_job.describe()
assert job_description["ProcessingInputs"][0]["InputName"] == "code"
assert job_description["ProcessingJobName"].startswith("test-script-processor-with-no-inputs")
assert job_description["ProcessingJobStatus"] == "Completed"
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 100
assert job_description["AppSpecification"]["ContainerArguments"] == ["-v"]
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert job_description["AppSpecification"]["ImageUri"] == image_uri
assert job_description["Environment"] == {"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"}
assert ROLE in job_description["RoleArn"]
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 3600}
job_from_name = ProcessingJob.from_processing_name(
sagemaker_session=sagemaker_session,
processing_job_name=job_description["ProcessingJobName"],
)
job_description = job_from_name.describe()
assert job_description["ProcessingInputs"][0]["InputName"] == "code"
assert job_description["ProcessingJobName"].startswith("test-script-processor-with-no-inputs")
assert job_description["ProcessingJobStatus"] == "Completed"
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 100
assert job_description["AppSpecification"]["ContainerArguments"] == ["-v"]
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert job_description["AppSpecification"]["ImageUri"] == image_uri
assert job_description["Environment"] == {"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"}
assert ROLE in job_description["RoleArn"]
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 3600}
@pytest.mark.release
def test_processor(sagemaker_session, image_uri, cpu_instance_type, output_kms_key):
script_path = os.path.join(DATA_DIR, "dummy_script.py")
processor = Processor(
role=ROLE,
image_uri=image_uri,
instance_count=1,
instance_type=cpu_instance_type,
entrypoint=["python3", "/opt/ml/processing/input/code/dummy_script.py"],
volume_size_in_gb=100,
volume_kms_key=None,
output_kms_key=output_kms_key,
max_runtime_in_seconds=3600,
base_job_name="test-processor",
env={"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"},
tags=[{"Key": "dummy-tag", "Value": "dummy-tag-value"}],
sagemaker_session=sagemaker_session,
)
processor.run(
inputs=[
ProcessingInput(
source=script_path, destination="/opt/ml/processing/input/code/", input_name="code"
)
],
outputs=[
ProcessingOutput(
source="/opt/ml/processing/output/container/path/",
output_name="dummy_output",
s3_upload_mode="EndOfJob",
)
],
arguments=["-v"],
wait=True,
logs=True,
)
job_description = processor.latest_job.describe()
assert job_description["ProcessingInputs"][0]["InputName"] == "code"
assert job_description["ProcessingJobName"].startswith("test-processor")
assert job_description["ProcessingJobStatus"] == "Completed"
assert job_description["ProcessingOutputConfig"]["KmsKeyId"] == output_kms_key
assert job_description["ProcessingOutputConfig"]["Outputs"][0]["OutputName"] == "dummy_output"
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 100
assert job_description["AppSpecification"]["ContainerArguments"] == ["-v"]
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert job_description["AppSpecification"]["ImageUri"] == image_uri
assert job_description["Environment"] == {"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"}
assert ROLE in job_description["RoleArn"]
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 3600}
def test_processor_with_custom_bucket(
sagemaker_session_with_custom_bucket,
custom_bucket_name,
image_uri,
cpu_instance_type,
output_kms_key,
input_kms_key,
):
script_path = os.path.join(DATA_DIR, "dummy_script.py")
processor = Processor(
role=ROLE,
image_uri=image_uri,
instance_count=1,
instance_type=cpu_instance_type,
entrypoint=["python3", "/opt/ml/processing/input/code/dummy_script.py"],
volume_size_in_gb=100,
volume_kms_key=None,
output_kms_key=output_kms_key,
max_runtime_in_seconds=3600,
base_job_name="test-processor",
env={"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"},
tags=[{"Key": "dummy-tag", "Value": "dummy-tag-value"}],
sagemaker_session=sagemaker_session_with_custom_bucket,
)
processor.run(
inputs=[
ProcessingInput(
source=script_path, destination="/opt/ml/processing/input/code/", input_name="code"
)
],
kms_key=input_kms_key,
outputs=[
ProcessingOutput(
source="/opt/ml/processing/output/container/path/",
output_name="dummy_output",
s3_upload_mode="EndOfJob",
)
],
arguments=["-v"],
wait=True,
logs=True,
)
job_description = processor.latest_job.describe()
assert job_description["ProcessingInputs"][0]["InputName"] == "code"
assert custom_bucket_name in job_description["ProcessingInputs"][0]["S3Input"]["S3Uri"]
assert job_description["ProcessingJobName"].startswith("test-processor")
assert job_description["ProcessingJobStatus"] == "Completed"
assert job_description["ProcessingOutputConfig"]["KmsKeyId"] == output_kms_key
assert job_description["ProcessingOutputConfig"]["Outputs"][0]["OutputName"] == "dummy_output"
assert job_description["ProcessingResources"]["ClusterConfig"]["InstanceCount"] == 1
assert (
job_description["ProcessingResources"]["ClusterConfig"]["InstanceType"] == cpu_instance_type
)
assert job_description["ProcessingResources"]["ClusterConfig"]["VolumeSizeInGB"] == 100
assert job_description["AppSpecification"]["ContainerArguments"] == ["-v"]
assert job_description["AppSpecification"]["ContainerEntrypoint"] == [
"python3",
"/opt/ml/processing/input/code/dummy_script.py",
]
assert job_description["AppSpecification"]["ImageUri"] == image_uri
assert job_description["Environment"] == {"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"}
assert ROLE in job_description["RoleArn"]
assert job_description["StoppingCondition"] == {"MaxRuntimeInSeconds": 3600}
def test_sklearn_with_network_config(sagemaker_session, sklearn_latest_version, cpu_instance_type):
script_path = os.path.join(DATA_DIR, "dummy_script.py")
input_file_path = os.path.join(DATA_DIR, "dummy_input.txt")
sklearn_processor = SKLearnProcessor(
framework_version=sklearn_latest_version,
role=ROLE,
instance_type=cpu_instance_type,
instance_count=1,
command=["python3"],
sagemaker_session=sagemaker_session,
base_job_name="test-sklearn-with-network-config",
network_config=NetworkConfig(
enable_network_isolation=True, encrypt_inter_container_traffic=True
),
)
sklearn_processor.run(
code=script_path,
inputs=[ProcessingInput(source=input_file_path, destination="/opt/ml/processing/inputs/")],
wait=False,
logs=False,
)
job_description = sklearn_processor.latest_job.describe()
network_config = job_description["NetworkConfig"]
assert network_config["EnableInterContainerTrafficEncryption"]
assert network_config["EnableNetworkIsolation"]
def test_processing_job_inputs_and_output_config(
sagemaker_session, image_uri, cpu_instance_type, output_kms_key
):
script_processor = ScriptProcessor(
role=ROLE,
image_uri=image_uri,
command=["python3"],
instance_count=1,
instance_type=cpu_instance_type,
volume_size_in_gb=100,
volume_kms_key=None,
output_kms_key=output_kms_key,
max_runtime_in_seconds=3600,
base_job_name="test-script-processor",
env={"DUMMY_ENVIRONMENT_VARIABLE": "dummy-value"},
tags=[{"Key": "dummy-tag", "Value": "dummy-tag-value"}],
sagemaker_session=sagemaker_session,
)
script_processor.run(
code=os.path.join(DATA_DIR, "dummy_script.py"),
inputs=_get_processing_inputs_with_all_parameters(sagemaker_session.default_bucket()),
outputs=_get_processing_outputs_with_all_parameters(),
arguments=["-v"],
wait=False,
)
job_description = script_processor.latest_job.describe()
expected_inputs_and_outputs = _get_processing_job_inputs_and_outputs(
sagemaker_session.default_bucket(), output_kms_key
)
assert (
job_description["ProcessingInputs"][:-1] == expected_inputs_and_outputs["ProcessingInputs"]
)
assert (
job_description["ProcessingOutputConfig"]
== expected_inputs_and_outputs["ProcessingOutputConfig"]
)
def _get_processing_inputs_with_all_parameters(bucket):
return [
ProcessingInput(
source=f"s3://{bucket}",
destination="/opt/ml/processing/input/data/",
input_name="my_dataset",
),
ProcessingInput(
input_name="s3_input",
s3_input=S3Input(
s3_uri=f"s3://{bucket}",
local_path="/opt/ml/processing/input/s3_input",
s3_data_type="S3Prefix",
s3_input_mode="File",
s3_data_distribution_type="FullyReplicated",
s3_compression_type="None",
),
),
ProcessingInput(
input_name="redshift_dataset_definition",
app_managed=True,
dataset_definition=DatasetDefinition(
local_path="/opt/ml/processing/input/rdd",
data_distribution_type="FullyReplicated",
input_mode="File",
redshift_dataset_definition=RedshiftDatasetDefinition(
cluster_id="integ-test-cluster-prod-us-west-2",
database="dev",
db_user="awsuser",
query_string="SELECT * FROM shoes",
cluster_role_arn="arn:aws:iam::037210630505:role/RedshiftClusterRole-prod-us-west-2",
output_s3_uri=f"s3://{bucket}/rdd",
output_format="CSV",
output_compression="None",
),
),
),
ProcessingInput(
input_name="athena_dataset_definition",
app_managed=True,
dataset_definition=DatasetDefinition(
local_path="/opt/ml/processing/input/add",
data_distribution_type="FullyReplicated",
input_mode="File",
athena_dataset_definition=AthenaDatasetDefinition(
catalog="AwsDataCatalog",
database="default",
work_group="workgroup",
query_string='SELECT * FROM "default"."s3_test_table_$STAGE_$REGIONUNDERSCORED";',
output_s3_uri=f"s3://{bucket}/add",
output_format="JSON",
output_compression="GZIP",
),
),
),
]
def _get_processing_outputs_with_all_parameters():
return [
ProcessingOutput(
feature_store_output=FeatureStoreOutput(feature_group_name="FeatureGroupName"),
app_managed=True,
)
]
def _get_processing_job_inputs_and_outputs(bucket, output_kms_key):
return {
"ProcessingInputs": [
{
"InputName": "my_dataset",
"AppManaged": False,
"S3Input": {
"S3Uri": f"s3://{bucket}",
"LocalPath": "/opt/ml/processing/input/data/",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
},
{
"InputName": "s3_input",
"AppManaged": False,
"S3Input": {
"S3Uri": f"s3://{bucket}",
"LocalPath": "/opt/ml/processing/input/s3_input",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
},
{
"InputName": "redshift_dataset_definition",
"AppManaged": True,
"DatasetDefinition": {
"RedshiftDatasetDefinition": {
"ClusterId": "integ-test-cluster-prod-us-west-2",
"Database": "dev",
"DbUser": "awsuser",
"QueryString": "SELECT * FROM shoes",
"ClusterRoleArn": "arn:aws:iam::037210630505:role/RedshiftClusterRole-prod-us-west-2",
"OutputS3Uri": f"s3://{bucket}/rdd",
"OutputFormat": "CSV",
"OutputCompression": "None",
},
"LocalPath": "/opt/ml/processing/input/rdd",
"DataDistributionType": "FullyReplicated",
"InputMode": "File",
},
},
{
"InputName": "athena_dataset_definition",
"AppManaged": True,
"DatasetDefinition": {
"AthenaDatasetDefinition": {
"Catalog": "AwsDataCatalog",
"Database": "default",
"QueryString": 'SELECT * FROM "default"."s3_test_table_$STAGE_$REGIONUNDERSCORED";',
"WorkGroup": "workgroup",
"OutputS3Uri": f"s3://{bucket}/add",
"OutputFormat": "JSON",
"OutputCompression": "GZIP",
},
"LocalPath": "/opt/ml/processing/input/add",
"DataDistributionType": "FullyReplicated",
"InputMode": "File",
},
},
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "output-1",
"FeatureStoreOutput": {"FeatureGroupName": "FeatureGroupName"},
"AppManaged": True,
}
],
"KmsKeyId": output_kms_key,
},
}
|
from .wrapper import TransitionRecorderWrapper
|
#
# Download wix from http://wix.sourceforge.net/
# Install it to a path that DOES NOT have spaces or use a subst drive
# Set windows environment variable DEVBIN
#
import os, sys, shutil, hashlib, socket, time, tempfile
import random, re
from xml.dom import minidom
from os.path import join
import getopt
packageFiles =""
# command line settings
cmdLineArgs ={
'name' : 'none',
'source': 'none',
'template': 'none',
}
cmdLineArgsHelp ={
'help': ( 'Display this usage message and exit', '' ),
'debug': ( 'Do not delete the temporary files for debugging builds.\n Store temp files in a temp folder under the current folder', '' ),
'name': ( 'The plug-in application name', 'path' ),
'source': ( 'The path to the plug-in directory', 'path' ),
'template': ( 'The path/filename to the xml file', 'path' ),
}
filestoskip =[ # ", someotherfile.ext" etc.
'.DS_Store',
'thumbs.db',
'PackageContents.xml'
]
#------------------------------------------------------------------------------
def buildwixtree (xdir):
global cmdLineArgs, filestoskip, packageFiles
dirs =[]
xxdir =xdir.replace (cmdLineArgs ['source'], './')
for e in os.listdir (xdir):
if os.path.isdir (os.path.join (xdir, e)):
dirs.append (e)
else:
if e not in filestoskip:
# ComponentEntry
packageFiles +=("\t\t<ComponentEntry ModuleName=\"%s\" />\n" % join (xxdir, e).replace ('\\', '/'))
# Directories
for d in dirs:
buildwixtree (join (xdir, d))
#------------------------------------------------------------------------------
def RenderFile (filename, outfile):
global cmdLineArgs, packageFiles
wix =open (filename, 'r')
lines ='' . join (wix.readlines ())
wix.close ()
lines =lines.replace ('{{AppName}}', cmdLineArgs ['name'])
lines =lines.replace ('{{GUID}}', GenerateGUID ())
lines =lines.replace ('{{FILES}}', packageFiles)
out =open (outfile, 'w')
out.write (lines)
out.close ()
#------------------------------------------------------------------------------
def createPackageContentsFile ():
global cmdLineArgs
buildwixtree (cmdLineArgs ['source'])
if os.path.isfile (cmdLineArgs ['template']):
RenderFile (cmdLineArgs ['template'], os.path.join (cmdLineArgs ['source'], 'PackageContents.xml'))
#------------------------------------------------------------------------------
# Generate a GUID for the windows installation
# Source: http://www.krugle.org/kse/files/svn/svn.sourceforge.net/nebuladevice/nebula2/buildsys3/guid.py
def GenerateGUID():
t =long(time.time () * 1000)
r =long(random.random () * 100000000000000000L)
ip =''
try:
ip =socket.gethostbyname (socket.gethostname ())
except:
# if we can't get a network address, just imagine one
ip = str (random.random () * 100000000000000000L)
data =str (t) + ' ' + str (r) + ' ' + ip
guidStr =hashlib.md5 (data).hexdigest ()
return '{%s-%s-%s-%s-%s}' % (guidStr [:8], guidStr [8:12], guidStr [12:16], guidStr [16:20], guidStr [20:])
#------------------------------------------------------------------------------
def parseargs():
global cmdLineArgs, cmdLineArgsHelp
for i, arg in enumerate (sys.argv):
sys.argv [i] =arg.replace ('\\', '/')
args =[]
argsSt =''
for i in cmdLineArgsHelp.keys ():
if cmdLineArgsHelp [i] [1] == '':
args.append (i)
argsSt +=i [:1]
else:
args.append (i + '=')
argsSt +=i [:1] + ':'
opts, unused_args =getopt.getopt (sys.argv [1:], argsSt, args)
# Debugging in Eclipse on the Mac add a second argument in sys.argv
if len (opts) == 0:
opts, unused_args =getopt.getopt (sys.argv [2:], argsSt, args)
# End of Debugging in Eclipse trick
cmdLineArgs ['debug'] =False
for o, a in opts:
if o in ('-h', '--help'):
usage ()
sys.exit ()
if o in ('-d', '--debug'):
cmdLineArgs ['debug'] =True
else:
cmdLineArgs [o[2:]] =a
# Add a trailing slash to the source directory
cmdLineArgs ['source'] +='/'
#------------------------------------------------------------------------------
def main ():
global cmdLineArgs, packageFiles
if len (sys.argv) < 2:
return (-1)
try:
parseargs ()
except getopt.GetoptError, err:
# Print help information and exit:
print str (err) # Will print something like "option -a not recognized"
return (2)
if os.path.isfile (os.path.join (cmdLineArgs ['source'], 'PackageContents.xml')):
for num in range(0, 1000):
if not os.path.isfile (os.path.join (cmdLineArgs ['source'], 'PackageContents.bak%d' % num)):
shutil.copy2 (os.path.join (cmdLineArgs ['source'], 'PackageContents.xml'), os.path.join (cmdLineArgs ['source'], 'PackageContents.bak%d' % num))
break
createPackageContentsFile ()
return (0)
#------------------------------------------------------------------------------
if __name__ == "__main__":
sys.exit(main())
|
from . import views
from flamingo.url.conf import path
routers = [
path(url="/aaaa", view_func_or_module=views.test, name="test_aaaa"),
path(url="/params/<int:id>/", view_func_or_module=views.params_test)
]
|
from urllib.parse import urlencode
import pytest
from django.contrib.auth import get_user_model
from django.http import HttpRequest
from django.urls import reverse
from tahoe_idp.tests.magiclink_fixtures import magic_link, user # NOQA: F401
User = get_user_model()
@pytest.mark.django_db
def test_login_verify(client, settings, magic_link): # NOQA: F811
url = reverse('tahoe_idp:login_verify')
request = HttpRequest()
ml = magic_link(request)
ml.save()
params = {'token': ml.token}
params['username'] = ml.username
query = urlencode(params)
url = '{url}?{query}'.format(url=url, query=query)
response = client.get(url)
assert response.status_code == 302
assert response.url == settings.LOGIN_REDIRECT_URL
needs_login_url = reverse('needs_login')
needs_login_response = client.get(needs_login_url)
assert needs_login_response.status_code == 200
@pytest.mark.django_db
def test_login_verify_with_redirect(client, magic_link): # NOQA: F811
request = HttpRequest()
ml = magic_link(request)
redirect_url = reverse('no_login')
ml.redirect_url = redirect_url
ml.save()
url = ml.generate_url(request)
response = client.get(url)
assert response.status_code == 302
assert response.url == redirect_url
@pytest.mark.django_db
def test_login_verify_failed_not_found(client, settings):
fail_redirect_url = '/failedredirect'
settings.MAGICLINK_LOGIN_FAILED_REDIRECT = fail_redirect_url
url = reverse('tahoe_idp:login_verify')
params = {'token': 'does not matter', 'username': 'does not matter'}
query = urlencode(params)
url = '{url}?{query}'.format(url=url, query=query)
response = client.get(url)
assert response.status_code == 302
assert response.url == fail_redirect_url
@pytest.mark.django_db
def test_login_verify_failed_redirect(client, settings):
fail_redirect_url = '/failedredirect'
settings.MAGICLINK_LOGIN_FAILED_REDIRECT = fail_redirect_url
url = reverse('tahoe_idp:login_verify')
params = {'token': 'does not matter', 'username': 'does not matter'}
query = urlencode(params)
url = '{url}?{query}'.format(url=url, query=query)
response = client.get(url)
assert response.url == fail_redirect_url
@pytest.mark.django_db
def test_login_verify_custom_verify(client, settings, magic_link): # NOQA: F811,E501
settings.MAGICLINK_LOGIN_VERIFY_URL = 'custom_login_verify'
request = HttpRequest()
ml = magic_link(request)
ml.redirect_url = reverse('needs_login') # Should be ignored
ml.save()
url = ml.generate_url(request)
response = client.get(url)
assert response.status_code == 302
assert response.url == reverse('no_login')
|
import csv, os, json, argparse, sys
"""
Print tree of proper chemclasses
"""
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--query", help="perform SPARQL query",
action="store_true")
# Read arguments from the command line
args = parser.parse_args()
#print(args)
# Check for --version or -V
dontquery = not args.query
script = os.path.basename(sys.argv[0])[:-3]
if dontquery is False:
print('performing query...', file=sys.stderr)
ret = os.popen('wd sparql {}.rq >{}.json'.format(script, script))
if ret.close() is not None:
raise
file = open('{}.json'.format(script))
s = file.read()
jol = json.loads(s)
items = {}
labels = {}
for d in jol:
it = d.get('item')[31:]
# it = d.get('value')
lab = d.get('itemLabel')
sup = d.get('super')[31:]
i = items.get(it)
if i is not None:
i.append(sup)
else:
items[it] = [sup]
labels[it] = lab
items['Q2393187'] = 'Q43460564'
labels['Q2393187'] = 'molecular entity'
labels['Q43460564'] = 'chemical entity'
print('{} classes'.format(len(items.keys())))
#print('Q415812' in set(items.keys()).union(set(['Q43460564'])))
#print([it for it,itsuplist in items.items() if 'Q415812' in itsuplist ])
edges = {}
for it,itsuplist in items.items():
for sup in itsuplist:
if sup in set(items.keys()).union(set(['Q43460564'])):
e = edges.get(sup)
if e is None:
edges[sup] = set([it])
else:
e.add(it)
#print(edges.get('Q415812'))
seen = set()
def walk(E, edges, prefix):
pfix = '├──'
if E in seen:
pfix = '╞══'
print('{}[[{}]] {}'.format(prefix + pfix, E, labels.get(E)))
if E in seen and edges.get(E) is not None:
print(prefix + '... see above')
return
seen.add(E)
children = edges.get(E)
prefix = ' │ ' + prefix
if len(prefix) > 70 or children is None:
return
for c in sorted(children, key=lambda c: labels.get(c)):
walk(c, edges, prefix)
walk('Q43460564', edges, ' ')
print()
walk('Q36496', edges, ' ')
|
# Copyright (c) 2016-2019, Broad Institute, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Broad Institute, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import textwrap
import argparse
from abc import ABCMeta, abstractmethod
class Subcommand(metaclass=ABCMeta):
"""Represents a subcommand with its own argument parser arguments."""
def register_arguments(self, subparser: argparse.ArgumentParser):
"""This function should register all required arguments for this
subcommand."""
@abstractmethod
def __call__(self, *args, **kwargs):
"""When the subcommand is used on the command line, this function
will be called."""
class SubcommandRegistry:
def __init__(self, version=None, subcommands_title="", *args, **kwargs):
self.parser = argparse.ArgumentParser(*args, **kwargs)
self.parser.set_defaults(subcommand_func=None)
self.subparsers = self.parser.add_subparsers(
title=subcommands_title if subcommands_title else "Subcommands")
if version:
self.parser.add_argument(
'--version', action='version', version=version)
def register_subcommand(self, name: str, subcommand: Subcommand, **kwargs):
# Use subcommand class level documentation also for documentation on
# command line -h/--help
if hasattr(subcommand.__class__, '__doc__'):
subcommand_doc = subcommand.__class__.__doc__
first_help_line = subcommand_doc.strip().split('\n\n')[0].strip()
kwargs['help'] = first_help_line
kwargs['description'] = textwrap.dedent(subcommand_doc)
kwargs['formatter_class'] = argparse.RawDescriptionHelpFormatter
subparser = self.subparsers.add_parser(name, **kwargs)
# Initialize subcommand arguments
subcommand.register_arguments(subparser)
subparser.set_defaults(subcommand_func=subcommand)
def run(self, parser_args: argparse.Namespace):
args_dict = vars(parser_args)
subcommand_func = args_dict.pop('subcommand_func')
if subcommand_func:
rc = subcommand_func(**args_dict)
else:
self.parser.print_help()
rc = 1
if rc is None:
rc = 0
sys.exit(rc)
|
import pytest
import datetime as dt
from unittest import mock
from django.urls import reverse
from genie.models import NotebookRunLogs, NOTEBOOK_STATUS_RUNNING, NOTEBOOK_STATUS_SUCCESS, NOTEBOOK_STATUS_ERROR
from workflows.models import WorkflowRunLogs, STATUS_RUNNING, STATUS_SUCCESS, STATUS_ERROR
from mixer.backend.django import mixer
from conftest import populate_seed_data
from genie.routineTasks import orphanJobsChecker
@pytest.mark.django_db
def test_getMetastoreTables(client, populate_seed_data, mocker):
notebookRunLogs = mixer.blend("genie.notebookRunLogs", status = NOTEBOOK_STATUS_RUNNING, updateTimestamp=dt.datetime.now() - dt.timedelta(seconds=100))
mocker.patch("utils.kubernetesAPI.KubernetesAPI.getPodStatus", return_value = "PENDING")
orphanJobsChecker()
assert NotebookRunLogs.objects.get(pk=notebookRunLogs.id).status == NOTEBOOK_STATUS_RUNNING
workflow = mixer.blend("workflows.workflow", periodictask=None)
workflowRunLogs = mixer.blend("workflows.workflowRunLogs", status=STATUS_RUNNING, workflow=workflow)
notebookRunLogs = mixer.blend("genie.notebookRunLogs", workflowRunLogs=workflowRunLogs, status=NOTEBOOK_STATUS_RUNNING)
orphanJobsChecker()
assert WorkflowRunLogs.objects.get(id=workflowRunLogs.id).status == STATUS_RUNNING
notebookRunLogs.status = NOTEBOOK_STATUS_SUCCESS
notebookRunLogs.save()
orphanJobsChecker()
assert WorkflowRunLogs.objects.get(id=workflowRunLogs.id).status == STATUS_SUCCESS
workflowRunLogs.status = STATUS_RUNNING
workflowRunLogs.save()
notebookRunLogs.status = NOTEBOOK_STATUS_ERROR
notebookRunLogs.save()
orphanJobsChecker()
assert WorkflowRunLogs.objects.get(id=workflowRunLogs.id).status == STATUS_ERROR
|
from cms.utils.plugins import get_plugins
from django import template
register = template.Library()
@register.simple_tag(name='media_plugins', takes_context=True)
def media_plugins(context, post):
"""
Extract :py:class:`djangocms_blog.media.base.MediaAttachmentPluginMixin`
plugins from the ``media`` placeholder of the provided post.
They can be rendered with ``render_plugin`` templatetag:
.. code-block: python
{% media_plugins post as media_plugins %}
{% for plugin in media_plugins %}{% render_plugin plugin %}{% endfor %}
:param context: template context
:type context: dict
:param post: post instance
:type post: :py:class:`djangocms_blog.models.Post`
:return: list of :py:class:`djangocms_blog.media.base.MediaAttachmentPluginMixin` plugins
:rtype: List[djangocms_blog.media.base.MediaAttachmentPluginMixin]
"""
request = context['request']
if post.media.get_plugins().exists():
return get_plugins(request, post.media, None)
return []
@register.simple_tag(name='media_images', takes_context=True)
def media_images(context, post, main=True):
"""
Extract images of the given size from all the
:py:class:`djangocms_blog.media.base.MediaAttachmentPluginMixin`
plugins in the ``media`` placeholder of the provided post.
Support ``djangocms-video`` ``poster`` field in case the plugin
does not implement ``MediaAttachmentPluginMixin`` API.
Usage:
.. code-block: python
{% media_images post False as thumbs %}
{% for thumb in thumbs %}<img src="{{ thumb }}/>{% endfor %}
.. code-block: python
{% media_images post as main_images %}
{% for image in main_images %}<img src="{{ image }}/>{% endfor %}
:param context: template context
:type context: dict
:param post: post instance
:type post: :py:class:`djangocms_blog.models.Post`
:param main: retrieve main image or thumbnail
:type main: bool
:return: list of images urls
:rtype: list
"""
plugins = media_plugins(context, post)
if main:
image_method = 'get_main_image'
else:
image_method = 'get_thumb_image'
images = []
for plugin in plugins:
try:
images.append(getattr(plugin, image_method)())
except Exception:
try:
image = getattr(plugin, 'poster')
if image:
images.append(image.url)
except AttributeError:
pass
return images
|
#!/usr/bin/env python
"""Use numpy array."""
import sys
import numpy as np
A = np.array([4, 6, 8])
print(type(A))
A[0] = 7
print(A)
sys.exit()
|
__all__ = ["search"]
# standard library
from logging import getLogger
from typing import List, Sequence
# dependencies
from arxiv import Search
from dateparser import parse
# submodules
from .article import Article
from .consts import CATEGORIES, KEYWORDS, START_DATE, END_DATE
# constants
ARXIV_DATE_FORMAT = "%Y%m%d%H%M%S"
# logger
logger = getLogger(__name__)
# runtime functions
def search(
categories: Sequence[str] = CATEGORIES,
keywords: Sequence[str] = KEYWORDS,
start_date: str = START_DATE,
end_date: str = END_DATE,
) -> List[Article]:
"""Search for articles in arXiv.
Args:
categories: arXiv categories.
keywords: Keywords of the search.
start_date: Start date of the search.
end_date: End date of the search.
Returns:
Articles found with given conditions.
"""
start_date = format_date(start_date)
end_date = format_date(end_date)
query = f"submittedDate:[{start_date} TO {end_date}]"
if categories:
sub = " OR ".join(f"cat:{cat}" for cat in categories)
query += f" AND ({sub})"
if keywords:
sub = " OR ".join(f'abs:"{kwd}"' for kwd in keywords)
query += f" AND ({sub})"
logger.debug(f"Searched articles by: {query!r}")
results = list(Search(query).results())
logger.debug(f"Number of articles found: {len(results)}")
return list(map(Article.from_arxiv_result, results))
def format_date(date_like: str) -> str:
"""Parse and format a date-like string."""
if (dt := parse(date_like)) is not None:
return dt.strftime(ARXIV_DATE_FORMAT)
raise ValueError(f"Could not parse {date_like!r}.")
|
from data import Database, ScrapeWSB
import streamlit as st
st.title('**Labeling Interface**')
st.markdown(""" #### Interface to assist in hand labeling posts and comments """)
st.text("IMPORTANT: You must set sentiment to 'select sentiment' when choosing 'POSTS' or 'COMMENTS'")
st.text("")
st.text("")
st.text("")
stock_id = st.sidebar.text_input("Stock Symbol")
num_posts = st.sidebar.slider("Number of Posts", min_value=1, max_value=100)
num_comments = st.sidebar.slider("Number of Comments", min_value=1, max_value=100)
time_filter = st.sidebar.selectbox("Time Filter", ['day', 'week', 'month'], index=0)
scrape = st.sidebar.button("Scrape")
if scrape and stock_id:
with st.spinner('Scraping...'):
scrapewsb = ScrapeWSB(stock_id, num_posts, num_comments, time_filter=time_filter)
df = scrapewsb.scrape()
scrapewsb.convert(df)
type_text = st.selectbox("Type of Text", ['POSTS', 'COMMENTS'])
db = Database()
db.use_database('DB1')
st.text("")
st.markdown("""#### Text to label:""")
st.text("")
display_text = st.empty()
st.text("")
st.text("")
sentiment = st.selectbox("Sentiment", ['select sentiment', 'positive', 'negative'], index=0)
display_text_key = '0'
if type_text == 'POSTS':
ids = db.query("SELECT POST_ID FROM POSTS WHERE TARGET=-1 ;")
if len(ids)==0:
display_text.text("NO UNLABELED {}".format(type_text))
display_text_key = str(int(display_text_key)+1)
else:
if sentiment=="select sentiment":
display_text.text(db.query("SELECT TITLE FROM POSTS WHERE POST_ID='{}'".format(ids[0][0]))[0][0])
display_text_key = str(int(display_text_key)+1)
else:
if len(ids)>1:
display_text.text(db.query("SELECT TITLE FROM POSTS WHERE POST_ID='{}'".format(ids[1][0]))[0][0])
display_text_key = str(int(display_text_key)+1)
if (sentiment=='positive'):
db.label(type_text, ids[0][0], 1)
if (sentiment=='negative'):
db.label(type_text, ids[0][0], 0)
elif type_text == 'COMMENTS':
ids = db.query("SELECT COMMENT_ID FROM COMMENTS WHERE TARGET=-1 ;")
if len(ids)==0:
display_text.text("NO UNLABELED {}".format(type_text))
display_text_key = str(int(display_text_key)+1)
else:
if sentiment=='select sentiment':
display_text.text(db.query("SELECT COMMENT FROM COMMENTS WHERE COMMENT_ID='{}'".format(ids[0][0]))[0][0])
display_text_key = str(int(display_text_key)+1)
else:
if len(ids)>1:
display_text.text(db.query("SELECT COMMENT FROM COMMENTS WHERE COMMENT_ID='{}'".format(ids[1][0]))[0][0])
display_text_key = str(int(display_text_key)+1)
if (sentiment=='positive'):
db.label(type_text, ids[0][0], 1)
if (sentiment=='negative'):
db.label(type_text, ids[0][0], 0)
else:
display_text.text("NO UNLABELED {}".format(type_text))
display_text_key = str(int(display_text_key)+1)
|
import sys
import re
import logging
import nbformat as nbf
from nbconvert.preprocessors import Preprocessor
import traitlets
from box import Box
DEBUG = 1
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
def load_lessons(lesson_dicts):
res = [Lesson(d, i) for i, d in enumerate(lesson_dicts)]
for i, lesson in enumerate(res):
if i > 0:
lesson._prev = res[i-1]
lesson.first = False
else:
lesson.first = True
if i < len(res)-1:
lesson._next = res[i+1]
lesson.last = False
else:
lesson.last = True
return res
class Lesson:
def __init__(self, d, idx):
self.num = idx+1
for k, v in d.items():
if isinstance(v, dict):
v = Box(v)
setattr(self, k, v)
@property
def exercise_forking_url(self):
return 'https://www.kaggle.com/kernels/fork/{}'.format(self.exercise.scriptid)
@property
def tutorial_url(self):
return 'https://www.kaggle.com/{}'.format(self.tutorial.slug)
@property
def next(self):
if self.last:
raise ValueError("Lesson number {} is the last. No next lesson.".format(self.num))
return self._next
@property
def prev(self):
if self.first:
raise ValueError("Lesson number {} is the first. No prev lesson.".format(self.num))
return self._prev
class LearnLessonPreprocessor(Preprocessor):
lessons_metadata = traitlets.List().tag(config=True)
def preprocess(self, nb, resources):
lt_meta = nb['metadata']['learntools_metadata']
lesson_ix = lt_meta['lesson_index']
lessons = load_lessons(self.lessons_metadata)
self.lessons = lessons
self.lesson = lessons[lesson_ix]
for i, cell in enumerate(nb.cells):
nb.cells[i] = self.process_cell(cell)
return nb, resources
def process_cell(self, cell):
pattern = r'#\$([^$]+)\$'
src = cell['source']
macros = re.finditer(pattern, src)
newsrc = ''
i = 0
for match in macros:
logging.debug(match)
a, b = match.span()
macro = match.group(1)
try:
newsrc += src[i:a] + self.expand_macro(macro)
except Exception as e:
print("Error parsing macro match {}".format(match))
raise e
i = b
newsrc += src[i:]
cell['source'] = newsrc
return cell
def expand_macro(self, macro):
args = []
if macro.endswith(')'):
macro, argstr = macro[:-1].split('(')
args = [argstr.strip()] if argstr.strip() else []
macro_method = getattr(self, macro)
return macro_method(*args)
def EXERCISE_FORKING_URL(self):
return self.lesson.exercise_forking_url
def YOURTURN(self):
return """# Your turn!
Head over to [the Exercises notebook]({}) to get some hands-on practice working with {}.""".format(
self.lesson.exercise_forking_url, self.lesson.topic,
)
def EXERCISE_SETUP(self):
# Standard setup code. Not currently used. Maybe should be.
pass
def TUTORIAL_URL(self, lesson_num=None):
if lesson_num is None:
lesson = self.lesson
else:
lesson_idx = int(lesson_num) - 1
lesson = self.lessons[lesson_idx]
return lesson.tutorial_url
def EXERCISE_URL(self, lesson_num):
# TODO: unify this + EXERCISE_FORKING_URL (have that be this with default arg)
lesson_idx = int(lesson_num) - 1
lesson = self.lessons[lesson_idx]
return lesson.exercise_forking_url
def EXERCISE_PREAMBLE(self):
return """These exercises accompany the tutorial on [{}]({}).""".format(
self.lesson.topic, self.lesson.tutorial_url,
)
def END_OF_EXERCISE(self, forum_cta=1):
# Don't use this macro for the very last exercise
next = self.lesson.next
res = ''
if int(forum_cta):
res += "If you have any questions, be sure to post them on the [forums](https://www.kaggle.com/learn-forum).\n\n"
res += """Remember that your notebook is private by default, and in order to share it with other people or ask for help with it, you'll need to make it public. First, you'll need to save a version of your notebook that shows your current work by hitting the "Commit & Run" button. (Your work is saved automatically, but versioning your work lets you go back and look at what it was like at the point you saved it. It also let's you share a nice compiled notebook instead of just the raw code.) Then, once your notebook is finished running, you can go to the Settings tab in the panel to the left (you may have to expand it by hitting the [<] button next to the "Commit & Run" button) and setting the "Visibility" dropdown to "Public".
# Keep Going
When you're ready to continue, [click here]({}) to continue on to the next tutorial on {}.""".format(
next.tutorial_url, next.topic,
)
return res
# Alternative formulation (used on days 5 and 6):
# Want feedback on your code? To share it with others or ask for help, you'll need to make it public. Save a version of your notebook that shows your current work by hitting the "Commit & Run" button. Once your notebook is finished running, go to the Settings tab in the panel to the left (you may have to expand it by hitting the [<] button next to the "Commit & Run" button) and set the "Visibility" dropdown to "Public".
|
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
from markdown.test_tools import TestCase
from markdown.extensions.tables import TableExtension
class TestTableBlocks(TestCase):
def test_empty_cells(self):
"""Empty cells (nbsp)."""
text = """
| Second Header
------------- | -------------
| Content Cell
Content Cell |
"""
self.assertMarkdownRenders(
text,
self.dedent(
"""
<table>
<thead>
<tr>
<th> </th>
<th>Second Header</th>
</tr>
</thead>
<tbody>
<tr>
<td> </td>
<td>Content Cell</td>
</tr>
<tr>
<td>Content Cell</td>
<td> </td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_no_sides(self):
self.assertMarkdownRenders(
self.dedent(
"""
First Header | Second Header
------------- | -------------
Content Cell | Content Cell
Content Cell | Content Cell
"""
),
self.dedent(
"""
<table>
<thead>
<tr>
<th>First Header</th>
<th>Second Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>Content Cell</td>
<td>Content Cell</td>
</tr>
<tr>
<td>Content Cell</td>
<td>Content Cell</td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_both_sides(self):
self.assertMarkdownRenders(
self.dedent(
"""
| First Header | Second Header |
| ------------- | ------------- |
| Content Cell | Content Cell |
| Content Cell | Content Cell |
"""
),
self.dedent(
"""
<table>
<thead>
<tr>
<th>First Header</th>
<th>Second Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>Content Cell</td>
<td>Content Cell</td>
</tr>
<tr>
<td>Content Cell</td>
<td>Content Cell</td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_align_columns(self):
self.assertMarkdownRenders(
self.dedent(
"""
| Item | Value |
| :-------- | -----:|
| Computer | $1600 |
| Phone | $12 |
| Pipe | $1 |
"""
),
self.dedent(
"""
<table>
<thead>
<tr>
<th style="text-align: left;">Item</th>
<th style="text-align: right;">Value</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: left;">Computer</td>
<td style="text-align: right;">$1600</td>
</tr>
<tr>
<td style="text-align: left;">Phone</td>
<td style="text-align: right;">$12</td>
</tr>
<tr>
<td style="text-align: left;">Pipe</td>
<td style="text-align: right;">$1</td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_styles_in_tables(self):
self.assertMarkdownRenders(
self.dedent(
"""
| Function name | Description |
| ------------- | ------------------------------ |
| `help()` | Display the help window. |
| `destroy()` | **Destroy your computer!** |
"""
),
self.dedent(
"""
<table>
<thead>
<tr>
<th>Function name</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>help()</code></td>
<td>Display the help window.</td>
</tr>
<tr>
<td><code>destroy()</code></td>
<td><strong>Destroy your computer!</strong></td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_align_three(self):
self.assertMarkdownRenders(
self.dedent(
"""
|foo|bar|baz|
|:--|:-:|--:|
| | Q | |
|W | | W|
"""
),
self.dedent(
"""
<table>
<thead>
<tr>
<th style="text-align: left;">foo</th>
<th style="text-align: center;">bar</th>
<th style="text-align: right;">baz</th>
</tr>
</thead>
<tbody>
<tr>
<td style="text-align: left;"></td>
<td style="text-align: center;">Q</td>
<td style="text-align: right;"></td>
</tr>
<tr>
<td style="text-align: left;">W</td>
<td style="text-align: center;"></td>
<td style="text-align: right;">W</td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_three_columns(self):
self.assertMarkdownRenders(
self.dedent(
"""
foo|bar|baz
---|---|---
| Q |
W | | W
"""
),
self.dedent(
"""
<table>
<thead>
<tr>
<th>foo</th>
<th>bar</th>
<th>baz</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>Q</td>
<td></td>
</tr>
<tr>
<td>W</td>
<td></td>
<td>W</td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_three_spaces_prefix(self):
self.assertMarkdownRenders(
self.dedent(
"""
Three spaces in front of a table:
First Header | Second Header
------------ | -------------
Content Cell | Content Cell
Content Cell | Content Cell
| First Header | Second Header |
| ------------ | ------------- |
| Content Cell | Content Cell |
| Content Cell | Content Cell |
"""),
self.dedent(
"""
<p>Three spaces in front of a table:</p>
<table>
<thead>
<tr>
<th>First Header</th>
<th>Second Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>Content Cell</td>
<td>Content Cell</td>
</tr>
<tr>
<td>Content Cell</td>
<td>Content Cell</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>First Header</th>
<th>Second Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>Content Cell</td>
<td>Content Cell</td>
</tr>
<tr>
<td>Content Cell</td>
<td>Content Cell</td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_code_block_table(self):
self.assertMarkdownRenders(
self.dedent(
"""
Four spaces is a code block:
First Header | Second Header
------------ | -------------
Content Cell | Content Cell
Content Cell | Content Cell
| First Header | Second Header |
| ------------ | ------------- |
"""),
self.dedent(
"""
<p>Four spaces is a code block:</p>
<pre><code>First Header | Second Header
------------ | -------------
Content Cell | Content Cell
Content Cell | Content Cell
</code></pre>
<table>
<thead>
<tr>
<th>First Header</th>
<th>Second Header</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td></td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_inline_code_blocks(self):
self.assertMarkdownRenders(
self.dedent(
"""
More inline code block tests
Column 1 | Column 2 | Column 3
---------|----------|---------
word 1 | word 2 | word 3
word 1 | `word 2` | word 3
word 1 | \\`word 2 | word 3
word 1 | `word 2 | word 3
word 1 | `word |2` | word 3
words |`` some | code `` | more words
words |``` some | code ``` | more words
words |```` some | code ```` | more words
words |`` some ` | ` code `` | more words
words |``` some ` | ` code ``` | more words
words |```` some ` | ` code ```` | more words
"""),
self.dedent(
"""
<p>More inline code block tests</p>
<table>
<thead>
<tr>
<th>Column 1</th>
<th>Column 2</th>
<th>Column 3</th>
</tr>
</thead>
<tbody>
<tr>
<td>word 1</td>
<td>word 2</td>
<td>word 3</td>
</tr>
<tr>
<td>word 1</td>
<td><code>word 2</code></td>
<td>word 3</td>
</tr>
<tr>
<td>word 1</td>
<td>`word 2</td>
<td>word 3</td>
</tr>
<tr>
<td>word 1</td>
<td>`word 2</td>
<td>word 3</td>
</tr>
<tr>
<td>word 1</td>
<td><code>word |2</code></td>
<td>word 3</td>
</tr>
<tr>
<td>words</td>
<td><code>some | code</code></td>
<td>more words</td>
</tr>
<tr>
<td>words</td>
<td><code>some | code</code></td>
<td>more words</td>
</tr>
<tr>
<td>words</td>
<td><code>some | code</code></td>
<td>more words</td>
</tr>
<tr>
<td>words</td>
<td><code>some ` | ` code</code></td>
<td>more words</td>
</tr>
<tr>
<td>words</td>
<td><code>some ` | ` code</code></td>
<td>more words</td>
</tr>
<tr>
<td>words</td>
<td><code>some ` | ` code</code></td>
<td>more words</td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_issue_440(self):
self.assertMarkdownRenders(
self.dedent(
"""
A test for issue #440:
foo | bar
--- | ---
foo | (`bar`) and `baz`.
"""),
self.dedent(
"""
<p>A test for issue #440:</p>
<table>
<thead>
<tr>
<th>foo</th>
<th>bar</th>
</tr>
</thead>
<tbody>
<tr>
<td>foo</td>
<td>(<code>bar</code>) and <code>baz</code>.</td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_lists_not_tables(self):
self.assertMarkdownRenders(
self.dedent(
"""
Lists are not tables
- this | should | not
- be | a | table
"""),
self.dedent(
"""
<p>Lists are not tables</p>
<ul>
<li>this | should | not</li>
<li>be | a | table</li>
</ul>
"""
),
extensions=['tables']
)
def test_issue_449(self):
self.assertMarkdownRenders(
self.dedent(
r"""
Add tests for issue #449
Odd backticks | Even backticks
------------ | -------------
``[!\"\#$%&'()*+,\-./:;<=>?@\[\\\]^_`{|}~]`` | ``[!\"\#$%&'()*+,\-./:;<=>?@\[\\\]^`_`{|}~]``
Escapes | More Escapes
------- | ------
`` `\`` | `\`
Only the first backtick can be escaped
Escaped | Bacticks
------- | ------
\`` \` | \`\`
Test escaped pipes
Column 1 | Column 2
-------- | --------
`|` \| | Pipes are okay in code and escaped. \|
| Column 1 | Column 2 |
| -------- | -------- |
| row1 | row1 \|
| row2 | row2 |
Test header escapes
| `` `\`` \| | `\` \|
| ---------- | ---- |
| row1 | row1 |
| row2 | row2 |
Escaped pipes in format row should not be a table
| Column1 | Column2 |
| ------- \|| ------- |
| row1 | row1 |
| row2 | row2 |
Test escaped code in Table
Should not be code | Should be code
------------------ | --------------
\`Not code\` | \\`code`
\\\`Not code\\\` | \\\\`code`
"""),
self.dedent(
"""
<p>Add tests for issue #449</p>
<table>
<thead>
<tr>
<th>Odd backticks</th>
<th>Even backticks</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>[!\\"\\#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~]</code></td>
<td><code>[!\\"\\#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^`_`{|}~]</code></td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Escapes</th>
<th>More Escapes</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>`\\</code></td>
<td><code>\\</code></td>
</tr>
</tbody>
</table>
<p>Only the first backtick can be escaped</p>
<table>
<thead>
<tr>
<th>Escaped</th>
<th>Bacticks</th>
</tr>
</thead>
<tbody>
<tr>
<td>`<code>\\</code></td>
<td>``</td>
</tr>
</tbody>
</table>
<p>Test escaped pipes</p>
<table>
<thead>
<tr>
<th>Column 1</th>
<th>Column 2</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>|</code> |</td>
<td>Pipes are okay in code and escaped. |</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Column 1</th>
<th>Column 2</th>
</tr>
</thead>
<tbody>
<tr>
<td>row1</td>
<td>row1 |</td>
</tr>
<tr>
<td>row2</td>
<td>row2</td>
</tr>
</tbody>
</table>
<p>Test header escapes</p>
<table>
<thead>
<tr>
<th><code>`\\</code> |</th>
<th><code>\\</code> |</th>
</tr>
</thead>
<tbody>
<tr>
<td>row1</td>
<td>row1</td>
</tr>
<tr>
<td>row2</td>
<td>row2</td>
</tr>
</tbody>
</table>
<p>Escaped pipes in format row should not be a table</p>
<p>| Column1 | Column2 |
| ------- || ------- |
| row1 | row1 |
| row2 | row2 |</p>
<p>Test escaped code in Table</p>
<table>
<thead>
<tr>
<th>Should not be code</th>
<th>Should be code</th>
</tr>
</thead>
<tbody>
<tr>
<td>`Not code`</td>
<td>\\<code>code</code></td>
</tr>
<tr>
<td>\\`Not code\\`</td>
<td>\\\\<code>code</code></td>
</tr>
</tbody>
</table>
"""
),
extensions=['tables']
)
def test_single_column_tables(self):
self.assertMarkdownRenders(
self.dedent(
"""
Single column tables
| Is a Table |
| ---------- |
| Is a Table
| ----------
Is a Table |
---------- |
| Is a Table |
| ---------- |
| row |
| Is a Table
| ----------
| row
Is a Table |
---------- |
row |
| Is not a Table
--------------
| row
Is not a Table |
--------------
row |
| Is not a Table
| --------------
row
Is not a Table |
-------------- |
row
"""),
self.dedent(
"""
<p>Single column tables</p>
<table>
<thead>
<tr>
<th>Is a Table</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Is a Table</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Is a Table</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Is a Table</th>
</tr>
</thead>
<tbody>
<tr>
<td>row</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Is a Table</th>
</tr>
</thead>
<tbody>
<tr>
<td>row</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Is a Table</th>
</tr>
</thead>
<tbody>
<tr>
<td>row</td>
</tr>
</tbody>
</table>
<h2>| Is not a Table</h2>
<p>| row</p>
<h2>Is not a Table |</h2>
<p>row |</p>
<p>| Is not a Table
| --------------
row</p>
<p>Is not a Table |
-------------- |
row</p>
"""
),
extensions=['tables']
)
def test_align_columns_legacy(self):
self.assertMarkdownRenders(
self.dedent(
"""
| Item | Value |
| :-------- | -----:|
| Computer | $1600 |
| Phone | $12 |
| Pipe | $1 |
"""
),
self.dedent(
"""
<table>
<thead>
<tr>
<th align="left">Item</th>
<th align="right">Value</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">Computer</td>
<td align="right">$1600</td>
</tr>
<tr>
<td align="left">Phone</td>
<td align="right">$12</td>
</tr>
<tr>
<td align="left">Pipe</td>
<td align="right">$1</td>
</tr>
</tbody>
</table>
"""
),
extensions=[TableExtension(use_align_attribute=True)]
)
def test_align_three_legacy(self):
self.assertMarkdownRenders(
self.dedent(
"""
|foo|bar|baz|
|:--|:-:|--:|
| | Q | |
|W | | W|
"""
),
self.dedent(
"""
<table>
<thead>
<tr>
<th align="left">foo</th>
<th align="center">bar</th>
<th align="right">baz</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left"></td>
<td align="center">Q</td>
<td align="right"></td>
</tr>
<tr>
<td align="left">W</td>
<td align="center"></td>
<td align="right">W</td>
</tr>
</tbody>
</table>
"""
),
extensions=[TableExtension(use_align_attribute=True)]
)
|
grau = float(input())
min = float(input())
seg = float(input())
conversor = seg / 60
min = min + conversor
conversor = min / 60
grau = grau + conversor
print('graus = {:.4f}'.format(grau))
|
try:
import unittest2 as unittest
except ImportError:
import unittest
import rope.base.history
from rope.base import exceptions
import rope.base.change
from ropetest import testutils
class HistoryTest(unittest.TestCase):
def setUp(self):
super(HistoryTest, self).setUp()
self.project = testutils.sample_project()
self.history = self.project.history
def tearDown(self):
testutils.remove_project(self.project)
super(HistoryTest, self).tearDown()
def test_undoing_writes(self):
my_file = self.project.root.create_file('my_file.txt')
my_file.write('text1')
self.history.undo()
self.assertEquals('', my_file.read())
def test_moving_files(self):
my_file = self.project.root.create_file('my_file.txt')
my_file.move('new_file.txt')
self.history.undo()
self.assertEquals('', my_file.read())
def test_moving_files_to_folders(self):
my_file = self.project.root.create_file('my_file.txt')
my_folder = self.project.root.create_folder('my_folder')
my_file.move(my_folder.path)
self.history.undo()
self.assertEquals('', my_file.read())
def test_writing_files_that_does_not_change_contents(self):
my_file = self.project.root.create_file('my_file.txt')
my_file.write('')
self.project.history.undo()
self.assertFalse(my_file.exists())
class IsolatedHistoryTest(unittest.TestCase):
def setUp(self):
super(IsolatedHistoryTest, self).setUp()
self.project = testutils.sample_project()
self.history = rope.base.history.History(self.project)
self.file1 = self.project.root.create_file('file1.txt')
self.file2 = self.project.root.create_file('file2.txt')
def tearDown(self):
testutils.remove_project(self.project)
super(IsolatedHistoryTest, self).tearDown()
def test_simple_undo(self):
change = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change)
self.assertEquals('1', self.file1.read())
self.history.undo()
self.assertEquals('', self.file1.read())
def test_tobe_undone(self):
change1 = rope.base.change.ChangeContents(self.file1, '1')
self.assertEquals(None, self.history.tobe_undone)
self.history.do(change1)
self.assertEquals(change1, self.history.tobe_undone)
change2 = rope.base.change.ChangeContents(self.file1, '2')
self.history.do(change2)
self.assertEquals(change2, self.history.tobe_undone)
self.history.undo()
self.assertEquals(change1, self.history.tobe_undone)
def test_tobe_redone(self):
change = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change)
self.assertEquals(None, self.history.tobe_redone)
self.history.undo()
self.assertEquals(change, self.history.tobe_redone)
def test_undo_limit(self):
history = rope.base.history.History(self.project, maxundos=1)
history.do(rope.base.change.ChangeContents(self.file1, '1'))
history.do(rope.base.change.ChangeContents(self.file1, '2'))
try:
history.undo()
with self.assertRaises(exceptions.HistoryError):
history.undo()
finally:
self.assertEquals('1', self.file1.read())
def test_simple_redo(self):
change = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change)
self.history.undo()
self.history.redo()
self.assertEquals('1', self.file1.read())
def test_simple_re_undo(self):
change = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change)
self.history.undo()
self.history.redo()
self.history.undo()
self.assertEquals('', self.file1.read())
def test_multiple_undos(self):
change = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change)
change = rope.base.change.ChangeContents(self.file1, '2')
self.history.do(change)
self.history.undo()
self.assertEquals('1', self.file1.read())
change = rope.base.change.ChangeContents(self.file1, '3')
self.history.do(change)
self.history.undo()
self.assertEquals('1', self.file1.read())
self.history.redo()
self.assertEquals('3', self.file1.read())
def test_undo_list_underflow(self):
with self.assertRaises(exceptions.HistoryError):
self.history.undo()
def test_redo_list_underflow(self):
with self.assertRaises(exceptions.HistoryError):
self.history.redo()
def test_dropping_undone_changes(self):
self.file1.write('1')
with self.assertRaises(exceptions.HistoryError):
self.history.undo(drop=True)
self.history.redo()
def test_undoing_choosen_changes(self):
change = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change)
self.history.undo(change)
self.assertEquals('', self.file1.read())
self.assertFalse(self.history.undo_list)
def test_undoing_choosen_changes2(self):
change1 = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change1)
self.history.do(rope.base.change.ChangeContents(self.file1, '2'))
self.history.undo(change1)
self.assertEquals('', self.file1.read())
self.assertFalse(self.history.undo_list)
def test_undoing_choosen_changes_not_undoing_others(self):
change1 = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change1)
self.history.do(rope.base.change.ChangeContents(self.file2, '2'))
self.history.undo(change1)
self.assertEquals('', self.file1.read())
self.assertEquals('2', self.file2.read())
def test_undoing_writing_after_moving(self):
change1 = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change1)
self.history.do(rope.base.change.MoveResource(self.file1, 'file3.txt'))
file3 = self.project.get_resource('file3.txt')
self.history.undo(change1)
self.assertEquals('', self.file1.read())
self.assertFalse(file3.exists())
def test_undoing_folder_movements_for_undoing_writes_inside_it(self):
folder = self.project.root.create_folder('folder')
file3 = folder.create_file('file3.txt')
change1 = rope.base.change.ChangeContents(file3, '1')
self.history.do(change1)
self.history.do(rope.base.change.MoveResource(folder, 'new_folder'))
new_folder = self.project.get_resource('new_folder')
self.history.undo(change1)
self.assertEquals('', file3.read())
self.assertFalse(new_folder.exists())
def test_undoing_changes_that_depend_on_a_dependant_change(self):
change1 = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change1)
changes = rope.base.change.ChangeSet('2nd change')
changes.add_change(rope.base.change.ChangeContents(self.file1, '2'))
changes.add_change(rope.base.change.ChangeContents(self.file2, '2'))
self.history.do(changes)
self.history.do(rope.base.change.MoveResource(self.file2, 'file3.txt'))
file3 = self.project.get_resource('file3.txt')
self.history.undo(change1)
self.assertEquals('', self.file1.read())
self.assertEquals('', self.file2.read())
self.assertFalse(file3.exists())
def test_undoing_writes_for_undoing_folder_movements_containing_it(self):
folder = self.project.root.create_folder('folder')
old_file = folder.create_file('file3.txt')
change1 = rope.base.change.MoveResource(folder, 'new_folder')
self.history.do(change1)
new_file = self.project.get_resource('new_folder/file3.txt')
self.history.do(rope.base.change.ChangeContents(new_file, '1'))
self.history.undo(change1)
self.assertEquals('', old_file.read())
self.assertFalse(new_file.exists())
def test_undoing_not_available_change(self):
change = rope.base.change.ChangeContents(self.file1, '1')
with self.assertRaises(exceptions.HistoryError):
self.history.undo(change)
def test_ignoring_ignored_resources(self):
self.project.set('ignored_resources', ['ignored*'])
ignored = self.project.get_file('ignored.txt')
change = rope.base.change.CreateResource(ignored)
self.history.do(change)
self.assertTrue(ignored.exists())
self.assertEquals(0, len(self.history.undo_list))
def test_get_file_undo_list_simple(self):
change = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change)
self.assertEquals(set([change]),
set(self.history.get_file_undo_list(self.file1)))
def test_get_file_undo_list_for_moves(self):
change = rope.base.change.MoveResource(self.file1, 'file2.txt')
self.history.do(change)
self.assertEquals(set([change]),
set(self.history.get_file_undo_list(self.file1)))
# XXX: What happens for moves before the file is created?
def xxx_test_get_file_undo_list_and_moving_its_contining_folder(self):
folder = self.project.root.create_folder('folder')
old_file = folder.create_file('file3.txt')
change1 = rope.base.change.MoveResource(folder, 'new_folder')
self.history.do(change1)
self.assertEquals(set([change1]),
set(self.history.get_file_undo_list(old_file)))
def test_clearing_redo_list_after_do(self):
change = rope.base.change.ChangeContents(self.file1, '1')
self.history.do(change)
self.history.undo()
self.history.do(change)
self.assertEquals(0, len(self.history.redo_list))
def test_undoing_a_not_yet_performed_change(self):
change = rope.base.change.ChangeContents(self.file1, '1')
str(change)
with self.assertRaises(exceptions.HistoryError):
change.undo()
def test_clearing_up_the_history(self):
change1 = rope.base.change.ChangeContents(self.file1, '1')
change2 = rope.base.change.ChangeContents(self.file1, '2')
self.history.do(change1)
self.history.do(change2)
self.history.undo()
self.history.clear()
self.assertEquals(0, len(self.history.undo_list))
self.assertEquals(0, len(self.history.redo_list))
def test_redoing_choosen_changes_not_undoing_others(self):
change1 = rope.base.change.ChangeContents(self.file1, '1')
change2 = rope.base.change.ChangeContents(self.file2, '2')
self.history.do(change1)
self.history.do(change2)
self.history.undo()
self.history.undo()
redone = self.history.redo(change2)
self.assertEquals([change2], redone)
self.assertEquals('', self.file1.read())
self.assertEquals('2', self.file2.read())
class SavingHistoryTest(unittest.TestCase):
def setUp(self):
super(SavingHistoryTest, self).setUp()
self.project = testutils.sample_project()
self.history = rope.base.history.History(self.project)
self.to_data = rope.base.change.ChangeToData()
self.to_change = rope.base.change.DataToChange(self.project)
def tearDown(self):
testutils.remove_project(self.project)
super(SavingHistoryTest, self).tearDown()
def test_simple_set_saving(self):
data = self.to_data(rope.base.change.ChangeSet('testing'))
change = self.to_change(data)
self.assertEquals('testing', str(change))
def test_simple_change_content_saving(self):
myfile = self.project.get_file('myfile.txt')
myfile.create()
myfile.write('1')
data = self.to_data(rope.base.change.ChangeContents(myfile, '2'))
change = self.to_change(data)
self.history.do(change)
self.assertEquals('2', myfile.read())
self.history.undo()
self.assertEquals('1', change.old_contents)
def test_move_resource_saving(self):
myfile = self.project.root.create_file('myfile.txt')
myfolder = self.project.root.create_folder('myfolder')
data = self.to_data(rope.base.change.MoveResource(myfile, 'myfolder'))
change = self.to_change(data)
self.history.do(change)
self.assertFalse(myfile.exists())
self.assertTrue(myfolder.has_child('myfile.txt'))
self.history.undo()
self.assertTrue(myfile.exists())
self.assertFalse(myfolder.has_child('myfile.txt'))
def test_move_resource_saving_for_folders(self):
myfolder = self.project.root.create_folder('myfolder')
newfolder = self.project.get_folder('newfolder')
change = rope.base.change.MoveResource(myfolder, 'newfolder')
self.history.do(change)
data = self.to_data(change)
change = self.to_change(data)
change.undo()
self.assertTrue(myfolder.exists())
self.assertFalse(newfolder.exists())
def test_create_file_saving(self):
myfile = self.project.get_file('myfile.txt')
data = self.to_data(rope.base.change.CreateFile(self.project.root,
'myfile.txt'))
change = self.to_change(data)
self.history.do(change)
self.assertTrue(myfile.exists())
self.history.undo()
self.assertFalse(myfile.exists())
def test_create_folder_saving(self):
myfolder = self.project.get_folder('myfolder')
data = self.to_data(rope.base.change.CreateFolder(self.project.root,
'myfolder'))
change = self.to_change(data)
self.history.do(change)
self.assertTrue(myfolder.exists())
self.history.undo()
self.assertFalse(myfolder.exists())
def test_create_resource_saving(self):
myfile = self.project.get_file('myfile.txt')
data = self.to_data(rope.base.change.CreateResource(myfile))
change = self.to_change(data)
self.history.do(change)
self.assertTrue(myfile.exists())
self.history.undo()
self.assertFalse(myfile.exists())
def test_remove_resource_saving(self):
myfile = self.project.root.create_file('myfile.txt')
data = self.to_data(rope.base.change.RemoveResource(myfile))
change = self.to_change(data)
self.history.do(change)
self.assertFalse(myfile.exists())
def test_change_set_saving(self):
change = rope.base.change.ChangeSet('testing')
myfile = self.project.get_file('myfile.txt')
change.add_change(rope.base.change.CreateResource(myfile))
change.add_change(rope.base.change.ChangeContents(myfile, '1'))
data = self.to_data(change)
change = self.to_change(data)
self.history.do(change)
self.assertEquals('1', myfile.read())
self.history.undo()
self.assertFalse(myfile.exists())
def test_writing_and_reading_history(self):
history_file = self.project.get_file('history.pickle') # noqa
self.project.set('save_history', True)
history = rope.base.history.History(self.project)
myfile = self.project.get_file('myfile.txt')
history.do(rope.base.change.CreateResource(myfile))
history.write()
history = rope.base.history.History(self.project)
history.undo()
self.assertFalse(myfile.exists())
def test_writing_and_reading_history2(self):
history_file = self.project.get_file('history.pickle') # noqa
self.project.set('save_history', True)
history = rope.base.history.History(self.project)
myfile = self.project.get_file('myfile.txt')
history.do(rope.base.change.CreateResource(myfile))
history.undo()
history.write()
history = rope.base.history.History(self.project)
history.redo()
self.assertTrue(myfile.exists())
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(HistoryTest))
result.addTests(unittest.makeSuite(IsolatedHistoryTest))
result.addTests(unittest.makeSuite(SavingHistoryTest))
return result
if __name__ == '__main__':
unittest.main()
|
# Copyright 2021 GraphRepo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module uses pydriller to search a repository
and indexes it in neo4j
"""
from diskcache import Cache
from datetime import datetime
from py2neo import Graph
from pydriller import RepositoryMining
import graphrepo.utils as utl
import graphrepo.drillers.batch_utils as b_utl
from graphrepo.config import Config
from graphrepo.drillers.drill_cache import DrillCacheSequential
from graphrepo.drillers.default import DefaultDriller
from graphrepo.logger import Logger
LG = Logger()
class Driller(DefaultDriller):
"""Drill class - parses a git repo and uses the models
to index everything in Neo4j. This class is a singleton
because it holds the connection to Neo4j in self.graph
"""
def index_batch(self, **kwargs):
"""Indexes data extracted by drill_batch of from
disk in Neo4j
:param kwargs: data keys and values (see the drill_batch return)
"""
try:
self.config.check_config()
self._check_connection()
b_utl.index_all(
self.graph, config=self.config.ct, **kwargs)
except Exception as exc:
LG.log_and_raise(exc)
else:
return
def index_from_file(self, file_path):
"""Reads a file and indexes the data in Neo4j
:param file_path: the path of the JSON file with data
"""
try:
data_ = utl.load_json(file_path)
self.index_batch(**data_)
except Exception as exc:
LG.log_and_raise(exc)
else:
return |
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.http import HttpResponse, JsonResponse
from django.core.exceptions import ValidationError
try:
from django.utils import simplejson as json
except ImportError:
import json
import logging
from braces.views import AnonymousRequiredMixin
from django.contrib.auth import login, authenticate
from django.shortcuts import redirect, get_object_or_404
from django.shortcuts import render
from django.views.generic import TemplateView
from .forms import LoginForm, ProfileForm, \
AddParticipantsForm
from .models import Profile, Participants
from django.contrib import messages
from django.utils.http import urlsafe_base64_decode
from django.core.paginator import Paginator
from django.shortcuts import render_to_response
from django.contrib import messages
User = get_user_model()
LOG = logging.getLogger('myStock.%s' % __name__)
# Create your views here.
class IndexView(TemplateView):
"""
"""
template_name = 'registration/index.html'
def get(self, request, *args, **kwargs):
"""
:param request:
:param args:
:param kwargs:
:return:
"""
return render(request, self.template_name)
class LoginView(AnonymousRequiredMixin, TemplateView):
"""
Login view class. Users are logged in
using either email or nick name.
"""
login_form = LoginForm
initial = {'key': 'value'}
template_name = 'registration/login.html'
def get(self, request, *args, **kwargs):
"""
function which return the template with login and signup form
:param request:
:param args:
:param kwargs:
:return:
"""
login_form = self.login_form(initial=self.initial)
context = {
'login_form': login_form,
}
return render(request, self.template_name, context)
def post(self, request):
"""
function which handles post request from login and signup form
to login and create user
:param request:
:param args:
:param kwargs:
:return:
"""
login_form = self.login_form(request.POST)
context = {
'login_form': login_form,
}
if login_form.is_valid():
email = login_form.cleaned_data.get('email')
raw_password = login_form.cleaned_data.get('password')
try:
user = authenticate(email=email, password=raw_password)
print(user)
login(request, user)
return redirect('/home')
except:
messages.warning(request, 'Email And Password Does Not Match.')
return redirect('/login')
return render(request, self.template_name, context)
class ParticipantsView(TemplateView):
participants_form = AddParticipantsForm
# sign_up_form = SignUpForm
initial = {'key': 'value'}
template_name = 'registration/adduser.html'
def get(self, request, *args, **kwargs):
"""
function which return the template with login and signup form
:param request:
:param args:
:param kwargs:
:return:
"""
participants_form = self.participants_form(initial=self.initial)
context = {
'participants_form': participants_form,
}
return render(request, self.template_name, context)
def post(self, request):
"""
:param request:
:return:
"""
participants_form = self.participants_form(request.POST)
if participants_form.is_valid():
sub_region = participants_form.cleaned_data.get('subregion')
try:
participants_form.save()
return redirect('/home')
except ValidationError as e:
print(e)
pass
context = {
'participants_form': participants_form,
}
return render(request, self.template_name, context)
class ParticipantList(TemplateView):
"""
"""
template_name = 'registration/dashboard.html'
def get(self, request, *args, **kwargs):
"""
:param request:
:param args:
:param kwargs:
:return:
"""
participants = Participants.objects.all()
context = {
'participants': participants
}
return render(request, self.template_name, context)
def DeleteParticipant(request, pk):
"""
"""
print(pk)
participant_object = Participants.objects.get(id=pk)
if participant_object:
participant_object.delete()
return redirect("/home")
def editParticipant(request, pk):
instance = get_object_or_404(Participants, id=pk)
participant_form = AddParticipantsForm(request.POST or None, instance=instance)
if participant_form.is_valid():
participant_form.save()
return redirect("/home")
return render(request, 'registration/adduser.html', {'participants_form': participant_form})
class AboutUs(TemplateView):
"""
"""
template_name = 'registration/about.html'
def get(self, request, *args, **kwargs):
"""
:param request:
:param args:
:param kwargs:
:return:
"""
return render(request, self.template_name)
class Mission(TemplateView):
"""
"""
template_name = 'registration/mission.html'
def get(self, request, *args, **kwargs):
"""
:param request:
:param args:
:param kwargs:
:return:
"""
return render(request, self.template_name)
class HomeView(TemplateView):
"""
"""
template_name = 'registration/participants.html'
def get(self, request, *args, **kwargs):
"""
:param request:
:param args:
:param kwargs:
:return:
"""
participants = Participants.objects.all()
context = {
'participants': participants
}
return render(request, self.template_name, context) |
"""Hive Alarm Module."""
class HiveHomeShield:
"""Hive homeshield alarm.
Returns:
object: Hive homeshield
"""
alarmType = "Alarm"
async def getMode(self):
"""Get current mode of the alarm.
Returns:
str: Mode if the alarm [armed_home, armed_away, armed_night]
"""
state = None
try:
data = self.session.data.alarm
state = data["mode"]
except KeyError as e:
await self.session.log.error(e)
return state
async def getState(self, device: dict):
"""Get the alarm triggered state.
Returns:
boolean: True/False if alarm is triggered.
"""
state = None
try:
data = self.session.data.devices[device["hiveID"]]
state = data["state"]["alarmActive"]
except KeyError as e:
await self.session.log.error(e)
return state
async def setMode(self, device: dict, mode: str):
"""Set the alarm mode.
Args:
device (dict): Alarm device.
Returns:
boolean: True/False if successful.
"""
final = False
if (
device["hiveID"] in self.session.data.devices
and device["deviceData"]["online"]
):
await self.session.hiveRefreshTokens()
resp = await self.session.api.setAlarm(mode=mode)
if resp["original"] == 200:
final = True
await self.session.getAlarm()
return final
class Alarm(HiveHomeShield):
"""Home assistant alarm.
Args:
HiveHomeShield (object): Class object.
"""
def __init__(self, session: object = None):
"""Initialise alarm.
Args:
session (object, optional): Used to interact with the hive account. Defaults to None.
"""
self.session = session
async def getAlarm(self, device: dict):
"""Get alarm data.
Args:
device (dict): Device to update.
Returns:
dict: Updated device.
"""
device["deviceData"].update(
{"online": await self.session.attr.onlineOffline(device["device_id"])}
)
dev_data = {}
if device["deviceData"]["online"]:
self.session.helper.deviceRecovered(device["device_id"])
data = self.session.data.devices[device["device_id"]]
dev_data = {
"hiveID": device["hiveID"],
"hiveName": device["hiveName"],
"hiveType": device["hiveType"],
"haName": device["haName"],
"haType": device["haType"],
"device_id": device["device_id"],
"device_name": device["device_name"],
"status": {
"state": await self.getState(device),
"mode": await self.getMode(),
},
"deviceData": data.get("props", None),
"parentDevice": data.get("parent", None),
"custom": device.get("custom", None),
"attributes": await self.session.attr.stateAttributes(
device["device_id"], device["hiveType"]
),
}
self.session.devices.update({device["hiveID"]: dev_data})
return self.session.devices[device["hiveID"]]
else:
await self.session.log.errorCheck(
device["device_id"], "ERROR", device["deviceData"]["online"]
)
return device
|
from models.quartznet import ASRModel
from utils import config
import torch
import os
import soundfile as sf
from datasets.librispeech import image_val_transform
from transforms import *
from torchvision.transforms import *
from utils.model_utils import get_most_probable
from collections import OrderedDict
from utils.logger import logger
import numpy as np
import torch.nn.functional as F
def load_checkpoint(model):
checkpoint_path = os.path.abspath(config.server_checkpoint)
if os.path.exists(checkpoint_path):
loader = torch.load(checkpoint_path, map_location='cpu')
old_state_dict = loader['model_state_dict']
new_state_dict = OrderedDict()
for k, v in old_state_dict.items():
name = k[7:]
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
def load_model():
model = ASRModel(input_features=config.num_mel_banks, num_classes=config.vocab_size)
logger.info('Intialized ASRModel.')
load_checkpoint(model)
logger.info(f'ASRModel weights set from {config.server_checkpoint}.')
model.eval()
logger.info('Set ASRModel to eval.')
return model
def get_mel_spec(signal):
data = {'samples': signal,
'sample_rate': config.sampling_rate}
to_stft = ToSTFT(n_fft=config.n_fft, hop_length=config.hop_length,
win_length=config.window_length)
stft_to_mel = ToMelSpectrogramFromSTFT(n_mels=config.num_mel_banks)
transforms = [to_stft, stft_to_mel, DeleteSTFT(), ToAudioTensor(['mel_spectrogram']), torch.from_numpy]
return Compose(transforms)(data)
@torch.no_grad()
def infer(audio):
spec = get_mel_spec(audio)
spec = F.pad(input=spec, pad=(0, int(spec.shape[1] * 0.2)), mode='constant', value=0)
spec = spec.unsqueeze(0)
y_pred = model(spec)
pred_sentence = get_most_probable(y_pred)[0]
return pred_sentence
model = load_model() |
"""Tests for dicomtrolley"""
|
"""Generate Gradcam maps for man/woman given the predicted captions"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import glob
import sys
import json
import os.path as osp
import tensorflow as tf
import PIL.Image
import numpy as np
sys.path.append('./im2txt/')
sys.path.append('.')
from im2txt import configuration
from im2txt import gradcam_wrapper
from im2txt.inference_utils import vocabulary
coco_dir = 'data/mscoco/'
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", "", "Model checkpoint file.")
tf.flags.DEFINE_string("vocab_file", "", "Text file containing the vocabulary.")
tf.flags.DEFINE_string("json_path", "", "JSON file with model predictions.")
tf.flags.DEFINE_string("img_path", "", "Text file containing 500 image IDs (balanced set).")
tf.flags.DEFINE_string("save_path", "", "Path to the location where outputs should be saved.")
tf.flags.DEFINE_string("male_word", "", "Male keyword.")
tf.flags.DEFINE_string("female_word", "", "Female keyword.")
#tf.flags.DEFINE_string("neutral_word", "", "Neutral keyword.")
tf.logging.set_verbosity(tf.logging.INFO)
def main(_):
# Build the inference graph.
g = tf.Graph()
with g.as_default():
model = gradcam_wrapper.GradCamWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
FLAGS.checkpoint_path)
#g.finalize()
save_path = osp.join(FLAGS.save_path, osp.basename(FLAGS.json_path)[0:-5])
if FLAGS.save_path != "" and not osp.isdir(save_path):
os.makedirs(save_path)
# Create the vocabulary.
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
man_id = vocab.word_to_id(FLAGS.male_word)
woman_id = vocab.word_to_id(FLAGS.female_word)
# person_id = vocab.word_to_id(FLAGS.neutral_word) # if we want to additionally process "person" words
json_data = json.load(open(FLAGS.json_path, 'r'))
json_dict = {}
for entry in json_data:
file_id = entry['filename']
if file_id not in json_dict:
caption = entry['caption']
caption = caption.lower()
json_dict[str(file_id)] = caption
filenames = glob.glob(FLAGS.img_path)
print(json_dict)
with tf.Session(graph=g) as sess:
# Load the model from checkpoint.
restore_fn(sess)
global_index = 0
for i, filename in enumerate(filenames):
with tf.gfile.GFile(filename, "r") as f:
image = f.read()
caption = json_dict[filename]
print(caption)
if caption[-1] == '.':
caption = caption[0:-1]
tokens = caption.split(' ')
tokens.insert(0, '<S>')
encoded_tokens = [vocab.word_to_id(w) for w in tokens]
man_ids = [i for i, c in enumerate(encoded_tokens) if c == man_id]
woman_ids = [i for i, c in enumerate(encoded_tokens) if c == woman_id]
# person_ids = [i for i, c in enumerate(encoded_tokens) if c == person_id]
if not (man_ids or woman_ids):
# nothing to do
continue
else:
for wid in man_ids:
if FLAGS.save_path != "":
save_path_pre = save_path + '/' + "%06d" % (global_index) + '_'
else:
save_path_pre = ""
model.process_image(sess, image, encoded_tokens, filename, vocab, word_index=wid-1, word_id=man_id, save_path=save_path_pre)
global_index += 1
for wid in woman_ids:
if FLAGS.save_path != "":
save_path_pre = save_path + '/' + "%06d" % (global_index) + '_'
else:
save_path_pre = ""
model.process_image(sess, image, encoded_tokens, filename, vocab, word_index=wid-1, word_id=woman_id, save_path=save_path_pre)
global_index += 1
# for wid in person_ids:
# if FLAGS.save_path != "":
# save_path_pre = save_path + '/' + "%06d" % (global_index) + '_'
# else:
# save_path_pre = ""
# model.process_image(sess, image, encoded_tokens, filename, vocab, word_index=wid-1, word_id=person_id, save_path=save_path_pre)
# global_index += 1
import gc
gc.collect()
if __name__ == "__main__":
tf.app.run()
|
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
import buggy.urls
import buggy_accounts.urls
urlpatterns = [
url(r'^', include(buggy.urls)),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include(buggy_accounts.urls)),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
#!/usr/bin/env python
r'''
https://www.hackerrank.com/challenges/designer-pdf-viewer/problem
'''
import math
import os
import random
import re
import sys
# Complete the designerPdfViewer function below.
def designerPdfViewer(h, word):
maxHeight = 0
for i in range(len(word)):
ch = h[ord(word[i]) - 97]
if ch > maxHeight:
maxHeight = ch
return maxHeight * len(word)
import unittest
class FAT(unittest.TestCase):
def setUp(self):
pass
def test_01(self):
self.assertEqual(9, designerPdfViewer([1, 3, 1, 3, 1, 4, 1, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5], 'abc'))
self.assertEqual(28, designerPdfViewer([1, 3, 1, 3, 1, 4, 1, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 7], 'zaba'))
|
from copy import deepcopy
def is_negative_literal(n):
if n[0] == "neg":
return True
return False
def is_positive_literal(n):
if n[0] != "neg":
return True
return False
def get_premises(formula):
# TODO
#return []
return [get_args(n)[0] for n in get_args(formula) if is_negative_literal(n)]
def get_conclusion(formula):
# TODO
#pass
return list(filter(is_positive_literal, get_args(formula)))[0]
def is_fact(formula):
# TODO
#pass
return is_positive_literal(formula)
def is_rule(formula):
# TODO
#pass
if get_head(formula) != get_head(make_or(make_atom("P"), make_atom("P"))):
return False
if not is_positive_literal(get_conclusion(formula)):
return False
return all(list(map(is_positive_literal, get_premises(formula))))
def has_args(f):
return is_function_call(f) or is_sentence(f)
def get_premises(formula):
return [get_args(n)[0] for n in get_args(formula) if is_negative_literal(n)]
def get_conclusion(formula):
return list(filter(is_positive_literal, get_args(formula)))[0]
# pentru constante (de verificat), se intoarce valoarea constantei. Altfel, None
def get_value(f):
if f[0] == "cnt":
return f[1]
return None
# pentru variabile (de verificat), se intoarce numele variabilei. Altfel, None
def get_name(f):
if f[0] == "var":
return f[1]
return None
# pentru apeluri de functii, se intoarce numele functiei; pentru atomi, se intoarce numele predicatului;
# pentru propozitii compuse, se intoarce un sir de caractere care reprezinta conectorul logic (~, A sau V)
# altfel, None
def get_head(f):
if f[0] == "fct" or f[0] == "pred":
return f[1]
elif f[0] == "neg" or f[0] == "and" or f[0] == "or":
return f[0]
return None
# pentru propozitii sau apeluri de functii, se intoarce lista de argumente. Altfel, None
def get_args(f):
if f[0] == "fct" or f[0] == "pred":
return f[2]
elif f[0] == "neg" or f[0] == "and" or f[0] == "or":
return f[1]
return None
# intoarce adevarat daca f este un termen constant
def is_constant(f):
return f[0] == "cnt"
# intoarce adevarat daca f este un termen ce este o variabila
def is_variable(f):
return f[0] == "var"
# intoarce adevarat daca f este un atom (aplicare a unui predicat)
def is_atom(f):
return f[0] == "pred"
# intoarce adevarat daca f este o propozitie valida
def is_sentence(f):
if f[0] == "pred" or f[0] == "neg" or f[0] == "and" or f[0] == "or":
return True
return False
# intoarce adevarat daca f este un apel de functie
def is_function_call(f):
return f[0] == "fct"
# intoarce adevarat daca f este un termen
def is_term(f):
return is_constant(f) or is_variable(f) or is_function_call(f)
# intoarce un termen constant, cu valoarea specificata
def make_const(value):
return ("cnt", value)
# intoarce un termen care este o variabila, cu numele specificat
def make_var(name):
return ("var", name)
# intoarce o formula formata dintr-un atom care este aplicarea
# predicatului dat pe restul argumentelor date
def make_atom(predicate, *args):
return ("pred", predicate, list(args))
# intoarce o formula care este negarea propozitiei date
def make_neg(sentence):
return ("neg", [sentence])
# intoarce un termen care este un apel al functiei cu numele specificat, pe restul argumentelor date.
# E.g. pentru a construi termenul add[1, 2, 3] vom apela make_function_call(add, 1, 2, 3)
def make_function_call(name, *args):
return ("fct", str(name), list(args))
# intoarce o formula care este conjunctia propozitiilor date
def make_and(sentence1, sentence2, *others):
if others is not None:
return ("and", [sentence1, sentence2] + list(others))
else:
return ("and", [sentence1, sentence2])
# intoarce o formula care este disjunctia propozitiilor date
def make_or(sentence1, sentence2, *others):
if others is not None:
return ("or", [sentence1, sentence2] + list(others))
else:
return ("or", [sentence1, sentence2])
def check_term(T):
return (is_constant(T) and get_value(T) is not None) or \
(is_variable(T) and get_name(T) is not None) or \
(is_function_call(T) and callable(get_head(T)) and not [t for t in get_args(T) if not check_term(t)])
def check_atom(A):
return is_atom(A) and get_head(A) is not None and not [t for t in get_args(A) if not check_term(t)]
dummy = make_atom("P")
and_name = get_head(make_and(dummy, dummy))
or_name = get_head(make_and(dummy, dummy))
neg_name = get_head(make_neg(dummy))
def check_sentence(S):
return is_sentence(S) and (
check_atom(S) or
((get_head(S) == and_name or get_head(S) == or_name) and len(get_args(S)) >= 2 and
not [s for s in get_args(S) if not check_sentence(s)])
or
(get_head(S) == neg_name and len(get_args(S)) == 1 and check_sentence(get_args(S)[0]))
)
def make_statement(conclusion, hypotheses):
L = list(hypotheses)
if not L:
return conclusion
L = [make_neg(s) for s in L]
print L
L.append(conclusion)
return make_or(*L)
def add_statement(kb, conclusion, *hypotheses):
s = make_statement(conclusion, hypotheses)
if check_sentence(s) is not None:
kb.append(s)
#print("Added statement " + print_formula(s, True))
return True
print("Sentence does not check out ", result)
return False
from functools import reduce
# intoarce formula sau apelul de functie date, in care argumentele au fost inlocuite cu lista new_args
# e.g. pentru formula p(x, y), inlocuirea argumentelor cu lista [1, 2] va rezulta in formula p(1, 2)
# Noua lista de argumente trebuie sa aiba aceeasi lungime cu numarul de argumente initial din formula
def replace_args(formula, new_args):
if formula[0] == "fct" or formula[0] == "pred":
if len(formula[2]) == len(new_args):
return (formula[0], formula[1], new_args)
elif formula[0] == "neg" or formula[0] == "and" or formula[0] == "or":
if len(formula[1]) == len(new_args):
return (formula[0], new_args)
return None
# Aplica in formula f toate elementele din substitutia data si intoarce formula rezultata
def substitute(f, substitution):
if substitution is None:
return None
if is_variable(f) and (get_name(f) in substitution):
return substitute(substitution[get_name(f)], substitution)
if has_args(f):
return replace_args(f, [substitute(arg, substitution) for arg in get_args(f)])
return f
from functools import reduce
# Verifica daca variabila v poate fi inlocuita cu termenul t, avand in vedere substitutia subst.
# Intoarce True daca v poate fi inlocuita cu t, si False altfel.
# Verificarea esueaza daca, avand in vedere si substitutia, variabila v apare in
# termenul t (deci inlocuirea lui v ar fi un proces infinit).
def occur_check(v, t, subst, topLevel = True):
if is_constant(t):
return True
elif is_variable(t):
if get_name(t) in subst:
return occur_check(v, substitute(t, subst), subst, False)
else:
if topLevel:
return True
else:
if get_name(v) != get_name(t):
return True
else:
return False
elif is_function_call(t):
for arg in get_args(t):
if occur_check(v, arg, subst, False) == False:
return False
return True
return True
# Unifica formulele f1 si f2. Rezultatul unificarii este o substitutie (dictionar nume-variabila -> termen),
# astfel incat daca se aplica substitutia celor doua formule, rezultatul este identic.
def unify(f1, f2, subst = None):
# TODO
#return {}
stack = []
if subst is None:
subst = {}
stack.append((f1, f2))
while stack:
(s, t) = stack.pop()
while is_variable(s) and get_name(s) in subst:
s = substitute(s, subst)
while is_variable(t) and get_name(t) in subst:
t = substitute(t, subst)
if s != t:
if is_variable(s):
if not occur_check(s, t, subst):
return False
subst[get_name(s)] = t
elif is_variable(t):
if not occur_check(t, s, subst):
return False
subst[get_name(t)] = s
elif has_args(s) and has_args(t):
if get_head(s) != get_head(t) or len(get_args(s)) != len(get_args(t)):
return False
for i in range(len(get_args(s))):
stack.append((get_args(s)[i], get_args(t)[i]))
else:
return False
return subst
# Afiseaza formula f.
# Daca argumentul return_result este True,
# rezultatul nu este afisat la consola, ci intors.
def print_formula(f, return_result = False):
ret = ""
if is_term(f):
if is_constant(f):
ret += str(get_value(f))
elif is_variable(f):
ret += "?" + get_name(f)
elif is_function_call(f):
ret += get_head(f) + "[" + "".join([print_formula(arg, True) + "," for arg in get_args(f)])[:-1] + "]"
else:
ret += "???"
elif is_atom(f):
ret += get_head(f) + "(" + "".join([print_formula(arg, True) + ", " for arg in get_args(f)])[:-2] + ")"
elif is_sentence(f):
# negation, conjunction or disjunction
args = get_args(f)
if len(args) == 1:
ret += get_head(f) + print_formula(args[0], True)
else:
ret += "(" + get_head(f) + "".join([" " + print_formula(arg, True) for arg in get_args(f)]) + ")"
else:
ret += "???"
if return_result:
return ret
print ret
def print_KB(KB):
print("KB now:")
for s in KB:
print("\t\t\t" + print_formula(s, True))
# def get_recursive_pred(formula):
# p = get_premises(formula)
# for pi in p[0][1]:
# if not isinstance(pi[1], list):
# return pi[1]
# else:
# get_recursive_pred(pi[1][1])
def equal_terms(args1, args2):
if len(args1) != len(args2):
# Predicatele au aritate diferita
return False
for i, arg in enumerate(args2):
if is_constant(arg):
if not is_constant(args1[i]) or get_value(args1[i]) != get_value(arg):
return False
if is_variable(arg):
if not is_variable(args1[i]) or get_name(args1[i]) != get_name(arg):
return False
if is_function_call(arg):
if not is_function(args1[i]) or get_head(args1[i]) != get_head(arg):
return False
if not equal_terms(get_args(args1[i]), get_args(arg)):
return False
return True
def is_equal_to(a1, a2):
# Nu verifica functii
if not is_atom(a1):
# a1 nu este atom
return False
if get_head(a1) != get_head(a2):
# Predicatele au nume diferite
return False
return equal_terms(get_args(a1), get_args(a2))
def forward_chaining(kb, theorem, verbose=True):
# Salvam baza de date originala, lucram cu o copie
local_kb = deepcopy(kb)
# Doua variabile care descriu starea cautarii
got_new_facts = True # s-au gasit fapte noi la ultima cuautare
is_proved = False # a fost demostrata teorema
# Verificam daca teorema este deja demonstata
for fact in filter(is_fact, local_kb):
if is_equal_to(fact, theorem):
return True
if unify(fact, theorem):
if verbose:
print "This already in KB: "
print_formula(fact)
is_proved = True
break
while (not is_proved) and got_new_facts:
got_new_facts = False
for rule in filter(is_rule, local_kb):
# Pentru fiecare regula
new_facts = apply_rule(rule, list(filter(is_fact, local_kb)))
new_facts = list(filter(lambda fact: not any(list(filter(lambda orig: is_equal_to(fact, orig), local_kb))), new_facts))
if new_facts:
if verbose:
print "Applied rule: "
print_formula(rule)
got_new_facts = True
for fact in new_facts:
#if verbose:
# print("New fact: ", end = "")
# print_formula(fact)
if unify(fact, theorem) != False:
is_proved = True
add_statement(local_kb, fact)
if verbose:
print "Now in KB: "
print_formula(fact)
break
add_statement(local_kb, fact)
if is_proved:
break
if verbose:
if is_proved:
print "The theorem is TRUE!"
else:
print "The theorem is FALSE!"
return is_proved
def apply_rule_2(rule, facts):
if is_fact(rule):
return [rule]
solutions = []
premises = get_premises(rule)
premise = premises.pop()
rest = make_statement(get_conclusion(rule), premises)
for fact in facts:
s = unify(premise, fact)
if s != False:
solutions.extend(apply_rule_2(substitute(rest, s), facts))
return solutions
def get_all_solutions(premises, facts, subst):
if premises:
all_solutions = []
for f in facts:
new_subst = unify(premises[0], f, deepcopy(subst))
if new_subst != False:
all_solutions.extend(get_all_solutions(premises[1:], facts, new_subst))
else:
all_solutions = [subst]
return all_solutions
def apply_rule(rule, facts):
# TODO
#return []
# varianta 1
#return list(map(lambda s: substitute(deepcopy(get_conclusion(rule)), s), get_all_solutions(get_premises(rule), facts, {})))
# varianta 2
return apply_rule_2(rule, facts)
|
import os
import numpy as np
import pandas as pd
import xarray as xr
import ESMF
USER = os.environ['USER']
os.environ['CESMDATAROOT'] = f'/glade/scratch/{USER}/inputdata'
import pop_tools
path_to_here = os.path.dirname(os.path.realpath(__file__))
regrid_dir = f'/glade/work/{USER}/adhoc-regridding'
os.makedirs(regrid_dir, exist_ok=True)
def _ensure_grid_file(grid_name, clobber):
"""ensure that grid file exists"""
grid_file = f'{regrid_dir}/{grid_name}.nc'
if os.path.exists(grid_file) and not clobber:
return grid_file
# generate file if needed
if grid_name in ['POP_gx1v6', 'POP_gx1v7', 'POP_gx3v7',]:
dso = pop_tools.get_grid(grid_name, scrip=True)
else:
raise ValueError('unknown grid')
dso.to_netcdf(grid_file)
return grid_file
def _esmf_pop_grid(grid_name, clobber=False):
"""instantiate an ESMF grid object"""
return ESMF.Grid(
filename=_ensure_grid_file(grid_name, clobber),
filetype=ESMF.api.constants.FileFormat.SCRIP,
add_corner_stagger=True,
pole_kind=[ESMF.api.constants.PoleKind.NONE, ESMF.api.constants.PoleKind.NONE], # is this the right choice?
)
def _esmf_locstream(lon, lat):
"""instantiate an ESMF locstream object"""
locstream = ESMF.LocStream(
len(lon), coord_sys=ESMF.CoordSys.SPH_DEG,
)
locstream["ESMF:Lon"] = lon.astype(np.dtype('f8'))
locstream["ESMF:Lat"] = lat.astype(np.dtype('f8'))
return locstream
def open_datastream(obs_name):
"""open raw dataset"""
filename_dict = dict(
dFe=f'{path_to_here}/dFe-database-2021-05-20.csv',
DOM=f'{path_to_here}/DOMobs.csv',
test=f'{path_to_here}/dfe-test.csv',
)
try:
filename = filename_dict[obs_name]
except:
raise ValueError(f'unknown obs name {obs_name}')
return pd.read_csv(filename, na_values=-999.).dropna(axis=0, how='all')
@pd.api.extensions.register_dataframe_accessor('obs_stream')
class obs_datastream:
def __init__(self, pandas_obj):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj):
"""verify the requried columns are present"""
for field in ['lat', 'lon', 'depth']:
if field not in obj.columns:
raise AttributeError(f"Must have '{field}' column.")
def add_model_field(self, da_in, model_grid=None, field_name=None, method='bilinear'):
"""return a DataFrame with obs and model"""
# determine dimensions
if da_in.dims == ('z_t', 'nlat', 'nlon'):
nk, nj, ni = da_in.shape
elif da_in.dims == ('nlat', 'nlon'):
nk = 0
nj, ni = da_in.shape
else:
raise ValueError(f'dimensions not supported: {da_in.dims}')
# get model grid
if model_grid is None:
if (nj, ni) == (116, 100):
model_grid = 'POP_gx3v7'
elif (nj, ni) == (384, 320):
model_grid = 'POP_gx1v7'
else:
raise ValueError(f'cannot infer model grid: {da_in.dims}')
grid = _esmf_pop_grid(model_grid)
# define locstream
df = self._obj
n_obs = len(df)
locstream = _esmf_locstream(df.lon.values, df.lat.values)
# set up remapping TODO: precompute and save regrid?
srcfield = ESMF.Field(grid, name='srcfield')
dstfield = ESMF.Field(locstream, name='dstfield')
method_dict = dict(
bilinear=ESMF.RegridMethod.BILINEAR,
nearest=ESMF.RegridMethod.NEAREST_STOD,
)
try:
ESMF_RegridMethod = method_dict[method]
except:
raise ValueError(f'unkown method {method}')
regrid = ESMF.Regrid(
srcfield, dstfield,
regrid_method=ESMF_RegridMethod,
unmapped_action=ESMF.UnmappedAction.ERROR,
)
if field_name is None:
field_name = da_in.name
i = 1
while field_name in df:
field_name = f'{da_in.name}_{i}'
i += 1
# 2D field
if nk == 0:
dstfield.data[...] = np.nan
srcfield.data[...] = da_in.data[:, :].T
dstfield = regrid(srcfield, dstfield, zero_region=ESMF.Region.SELECT)
df[field_name] = dstfield.data
else:
# TODO: this is a little clunky, would be better to simply do 3D interpolation
da_out_columns = np.ones((nk, n_obs)) * np.nan
for k in range(nk):
dstfield.data[...] = np.nan
srcfield.data[...] = da_in.data[k, :, :].T
dstfield = regrid(srcfield, dstfield, zero_region=ESMF.Region.SELECT)
da_out_columns[k, :] = dstfield.data
dstfield_z = np.ones((n_obs)) * np.nan
for n in range(n_obs):
dstfield_z[n] = np.interp(df.depth.values[n]*1e2, da_in.z_t, da_out_columns[:, n])
df[field_name] = dstfield_z |
import wpilib
from wpilib.interfaces import SpeedController
from wpilib import VictorSP
from magicbot import will_reset_to
class Intake:
intake_motor: VictorSP
speed = will_reset_to(0)
def spin(self, speed: float):
self.speed = speed
def execute(self):
self.intake_motor.set(self.speed)
|
import pandas
s = pandas.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 5, 3])
print(s)
mean = s.mean()
print("Mean: " + str(mean))
sd = s.std()
print("Standard Deviation: " + str(sd))
|
import random
class Character:
name = ""
attack = 0
defense = 0
maxhealth = 0
currenthealth = 0
def getName(self):
return self.name
def getCurrentHealth(self):
return self.currenthealth
def getMaxHealth(self):
return self.maxhealth
def getAttack(self):
return self.attack
def getDefense(self):
return self.defense
def updateHealth(self, value):
if value > 0:
mod = random.randint(80, 120)
value *= mod / (mod + self.defense)
value = int(value)
if self.currenthealth - value <= 0:
self.currenthealth = 0
print(self.name, "took", value, "damage!")
print(self.name, "died...")
return False
else:
self.currenthealth -= value
print(self.name, "took", value, "damage!")
return True
elif value < 0:
if self.currenthealth - value > self.maxhealth:
value = (self.maxhealth - self.currenthealth)
self.currenthealth = self.maxhealth
print(self.name, "healed", value, "hit points.", self.name, "is at full health!")
return True
else:
value = -value
self.currenthealth += value
print(self.name, "healed", value, "hit points.")
return True
class Hero(Character):
level = 0
exp = 0
expToLevel = 0
def __init__(self, name):
self.name = name
self.level = 1
self.expToLevel = 100
self.attack = 15
self.defense = 15
self.maxhealth = 100
self.currenthealth = 100
def getLevel(self):
return self.level
def getExp(self):
return self.exp
def getExpToLevel(self):
return self.expToLevel
def updateExp(self, value):
if value != 0:
self.exp += value
print(self.name, "gained", value, "experience points!")
if(self.exp >= self.expToLevel):
self.levelUp()
def levelUp(self):
self.level += 1
print(self.name, "leveled up!")
print (self.name, "is now level", str(self.level) + "!")
self.exp -= self.expToLevel
self.expToLevel *= 1.5
choice = None
while choice != "attack" and choice != "defense" and choice != "health":
print("Which stat do you want to focus on?")
choice = input("attack / defense / health: ")
if choice != "attack" and choice != "defense" and choice != "health":
print("Please choose \"attack\" or \"defense\" or \"health\"...")
if choice == "attack":
self.attack *= 1.5
self.attack = int(self.attack)
print(self.name + "'s attack has raised to", str(self.attack) + "!")
self.defense *= 1.3
self.defense = int(self.defense)
print(self.name + "'s defense has raised to", str(self.defense) + "!")
self.maxhealth *= 1.3
self.maxhealth = int(self.maxhealth)
self.currenthealth = self.maxhealth
print(self.name + "'s max health has raised to", self.maxhealth, "and their health has been fully restored!")
if choice == "defense":
self.attack *= 1.3
self.attack = int(self.attack)
print(self.name + "'s attack has raised to", str(self.attack) + "!")
self.defense *= 1.5
self.defense = int(self.defense)
print(self.name + "'s defense has raised to", str(self.defense) + "!")
self.maxhealth *= 1.3
self.maxhealth = int(self.maxhealth)
self.currenthealth = self.maxhealth
print(self.name + "'s max health has raised to", self.maxhealth, "and their health has been fully restored!")
if choice == "health":
self.attack *= 1.3
self.attack = int(self.attack)
print(self.name + "'s attack has raised to", str(self.attack) + "!")
self.defense *= 1.3
self.defense = int(self.defense)
print(self.name + "'s defense has raised to", str(self.defense) + "!")
self.maxhealth *= 1.5
self.maxhealth = int(self.maxhealth)
self.currenthealth = self.maxhealth
print(self.name + "'s max health has raised to", self.maxhealth, "and their health has been fully restored!")
if(self.exp >= self.expToLevel):
self.levelUp()
class Monster(Character):
def __init__(self, name, attack, defense, maxHealth):
self.name = name
self.attack = attack
self.defense = defense
self.maxhealth = maxHealth
self.currenthealth = maxHealth
def decideNextMove(self):
factor = random.randint(0, self.currenthealth + self.attack + self.defense)
if factor == 0:
return "retreat"
elif factor <= self.currenthealth + self.attack:
return "attack"
else:
return "defend"
class NPC(Character):
attack = None
defense = None
maHealth = None
currenthealth = None
dialogue = []
currentLine = 0
def __init__(self, name, dialogue):
self.name = name
self.dialogue = dialogue
def getNextLine(self):
if len(self.dialogue) <= self.currentLine:
return None
else:
self.currentLine += 1
return self.dialogue[self.currentLine - 1]
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import json
import pytest
@pytest.mark.parametrize(
'http_method,http_path',
(
('GET', '/api/v1/auth/clients'),
('POST', '/api/v1/auth/clients'),
),
)
def test_unauthorized_access(http_method, http_path, flask_app_client):
response = flask_app_client.open(method=http_method, path=http_path)
print(response)
assert response.status_code == 401
def test_created_user_login(flask_app_client, admin_user, request):
from app.modules.users.models import User
with flask_app_client.login(admin_user, auth_scopes=('users:write',)):
response = flask_app_client.post(
'/api/v1/users/',
content_type='application/json',
data=json.dumps({'email': 'test.user@example.org', 'password': 'password'}),
)
assert response.status_code == 200
assert response.json['email'] == 'test.user@example.org'
assert response.json['is_active'] is True
user_guid = response.json['guid']
request.addfinalizer(lambda: User.query.get(user_guid).delete())
response = flask_app_client.post(
'/api/v1/auth/sessions',
content_type='application/json',
data=json.dumps({'email': 'test.user@example.org', 'password': 'password'}),
)
request.addfinalizer(lambda: flask_app_client.cookie_jar.clear())
assert response.status_code == 200, response.json
flask_app_client.cookie_jar.clear()
# Check users can't login if is_active is False
with flask_app_client.login(admin_user, auth_scopes=('users:write',)):
response = flask_app_client.patch(
f'/api/v1/users/{user_guid}',
content_type='application/json',
data=json.dumps(
[
{
'op': 'test',
'path': '/current_password',
'value': admin_user.password_secret,
},
{
'op': 'replace',
'path': '/is_active',
'value': False,
},
],
),
)
assert response.status_code == 200
assert response.json['is_active'] is False
response = flask_app_client.post(
'/api/v1/auth/sessions',
content_type='application/json',
data=json.dumps({'email': 'test.user@example.org', 'password': 'password'}),
)
assert response.status_code == 401, response.json
assert response.json['message'] == 'Account Disabled'
|
from django.shortcuts import render
# Create your views here.
def home_view(request, *args, **kwargs):
return render(request, "home/home_view.html", {})
def chat_room_view(request, *args, **kwargs):
return render(request, "home/chat_room_view.html", {})
|
# -*- coding: utf-8 -*-
from contextlib import contextmanager
import os
import tempfile
import json
def _tempfile(filename):
"""
Create a NamedTemporaryFile instance to be passed to atomic_writer
"""
return tempfile.NamedTemporaryFile(mode='w',
dir=os.path.dirname(filename),
prefix=os.path.basename(filename),
suffix=os.fsencode('.tmp'),
delete=False)
@contextmanager
def atomic_write(filename):
"""
Open a NamedTemoraryFile handle in a context manager
"""
f = _tempfile(os.fsencode(filename))
try:
yield f
finally:
f.close()
# replace the original file with the new temp file (atomic on success)
os.replace(f.name, filename)
def get_item(filename, uuid):
"""
Read entry from JSON file
"""
with open(os.fsencode(str(filename)), "r") as f:
data = json.load(f)
results = [i for i in data if i["uuid"] == str(uuid)]
if results:
return results
return None
def set_item(filename, item):
"""
Save entry to JSON file
"""
with atomic_write(os.fsencode(str(filename))) as temp_file:
with open(os.fsencode(str(filename))) as products_file:
# load the JSON data into memory
products_data = json.load(products_file)
# check if UUID already exists
uuid_list = [i for i in filter(
lambda z: z["uuid"] == str(item["uuid"]), products_data)]
if len(uuid_list) == 0:
# add the new item to the JSON file
products_data.append(item)
# save the new JSON to the temp file
json.dump(products_data, temp_file)
return True
return None # record already exists
def update_item(filename, item, uuid):
"""
Update entry by UUID in the JSON file
"""
with atomic_write(os.fsencode(str(filename))) as temp_file:
with open(os.fsencode(str(filename))) as products_file:
# load the JSON data into memory
products_data = json.load(products_file)
# apply modifications to the JSON data wrt UUID
# TODO: handle this in a neat way
if 'products' in products_data[-1]:
# handle orders object
[products_data[i]["products"][0].update(item) for (
i, j) in enumerate(products_data) if j["uuid"] == str(uuid)]
else:
# handle products object
[products_data[i].update(item) for (i, j) in enumerate(
products_data) if j["uuid"] == str(uuid)]
# save the modified JSON data into the temp file
json.dump(products_data, temp_file)
return True
|
"""
Simple text plugin forms
"""
from html import unescape
from unicodedata import normalize
from django import forms
from django.utils.html import strip_spaces_between_tags
from djangocms_text_ckeditor.widgets import TextEditorWidget
from .models import SimpleText
class CKEditorPluginForm(forms.ModelForm):
"""
Plugin form used to fill its content from frontend admin.
"""
class Meta:
"""
Form meta attributes
"""
model = SimpleText
widgets = {"body": TextEditorWidget}
fields = ["body"]
def clean_body(self):
"""Normalize and unescape the text submitted by CKEditor then remove useless spaces."""
body = self.cleaned_data.get("body", "")
body = normalize("NFKC", body)
body = unescape(body)
return strip_spaces_between_tags(body)
|
#!/usr/bin/python
import networkx as nx
import pprint
import sys
pp = pprint.PrettyPrinter(indent=2)
def write_dfg(dfgfile, dfg):
nx.drawing.nx_agraph.write_dot(dfg, 'ldst.'+dfgfile)
def add_edges(a,b, sorted_memops,dfg):
for val in sorted_memops[a+1:b+1]:
# print "adding edges : val_a= " + str(sorted_memops[a]) + " val_b= " + str(val)
dfg.add_edge(sorted_memops[a],val, color='orange')
def main(dfgfile):
global pp
dfg = nx.DiGraph(nx.drawing.nx_agraph.read_dot(dfgfile))
memops = []
# Create a list of memops in sorted order
# x3 = dfg.nodes()
for x1 in dfg.nodes():
if dfg.node[x1]['opcode'] == 'Load' or dfg.node[x1]['opcode'] == 'Store':
if '!LO' not in dfg.node[x1]['ir']:
memops.append(int(x1))
sorted_memops = sorted(memops)
print sorted_memops
a=0
for idx,val in enumerate(sorted_memops):
if dfg.node[str(val)]['opcode']=='Store':
b=idx
print "a= " + str(a) + " b= " + str(b)
# print "a= " + str(a) +" val_a: "+ str(sorted_memops[a]) + " b= " + str(b) + " val_b: " + str(sorted_memops[b])
add_edges(a,b,sorted_memops,dfg)
a=b
write_dfg(dfgfile,dfg)
if __name__ == "__main__":
main(sys.argv[1])
|
from flask import Flask, render_template, send_from_directory, redirect, url_for, request, flash,Blueprint,jsonify, current_app
from portfolio.forms import LoginForm, Testimoni_form, SkillForm, ProjectForm, JobForm,changepictureForm
import os
from PIL import Image
from datetime import datetime
from flask_login import login_user, current_user, logout_user, login_required
from portfolio import db
from portfolio.models import Testimonial, User, Job, Project, Skills
import uuid
main = Blueprint('main', __name__,static_folder='static', static_url_path='/static/')
@main.route('/')
def home():
testimoni = Testimonial.query.all()
sk_data = Skills.query.all()
project = Project.query.all()
job = Job.query.all()
job.reverse()
exp_list = []
for j in job:
if j.end:
start = j.start
end = j.end
diff = end-start
exp_year = divmod(diff.days, 365)
exp_month = divmod(exp_year[1], 30)
exp_string = f'{exp_year[0]} year {exp_month[0]} month {exp_month[1]} days'
exp_list.append(exp_string)
elif not j.end:
start = j.start
end = datetime.now()
diff = end-start
exp_year = divmod(diff.days, 365)
exp_month = divmod(exp_year[1], 30)
exp_string = f'{exp_year[0]} year {exp_month[0]} month {exp_month[1]} days'
exp_list.append(exp_string)
return render_template('home.html', testimoni_data=testimoni, skills=sk_data,projects = project,jobs=job,exp=exp_list)
@main.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.data'))
login = LoginForm()
#print(login.hidden_tag())
if login.validate_on_submit():
user_data = User.query.filter_by(email=login.email.data).first()
if user_data and user_data.password == login.password.data:
login_user(user_data)
return redirect(url_for('main.data'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
# print(login.errors)
return render_template('login.html', form=login)
@main.route("/logout")
def logout():
logout_user()
return redirect(url_for('main.login'))
@main.route('/formdata', methods=['GET', 'POST'])
@login_required
def data():
testimonial_data = Testimonial.query.all()
skill_data = Skills.query.all()
project_data = Project.query.all()
jobdata = Job.query.all()
return render_template('edit.html', testimoni=testimonial_data, skills=skill_data, projects=project_data,jobs=jobdata)
def save_picture(form_picture):
# random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
# picture_fn = random_hex + f_ext
picture_path = os.path.join(
main.root_path, 'static/img', form_picture.filename)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return form_picture.filename
@main.route('/formdata/<path:form_type>/<int:data_id>', methods=['GET', 'POST'])
@login_required
def edit_post(data_id, form_type):
if form_type == 'testimoni':
testimoni_data = Testimonial.query.filter_by(id=data_id).first()
test_form = Testimoni_form(id=data_id)
pic_form = changepictureForm(id=data_id)
if test_form.submit.data and test_form.validate_on_submit():
testimoni_data.name = test_form.name.data
testimoni_data.desc = test_form.desc.data
testimoni_data.testimony = test_form.main.data
db.session.commit()
return redirect(url_for('main.data'))
elif request.method == 'GET':
test_form.name.data = testimoni_data.name
test_form.desc.data = testimoni_data.desc
test_form.main.data = testimoni_data.testimony
return render_template('edit_data.html', form=test_form, type=form_type, pic=True, image=testimoni_data, form_pic=pic_form)
return render_template('edit_data.html', form=test_form, type=form_type,pic=True,image=testimoni_data,form_pic=pic_form)
elif form_type == 'skill':
skilldata = Skills.query.filter_by(id=data_id).first()
skill_add_form = SkillForm(id=data_id)
if skill_add_form.validate_on_submit():
skilldata.sk_name = skill_add_form.skillsname.data
skilldata.sk_value = skill_add_form.skills.data
db.session.commit()
return redirect(url_for('main.data'))
elif request.method == 'GET':
skill_add_form.skillsname.data = skilldata.sk_name
skill_add_form.skills.data = skilldata.sk_value
return render_template('edit_data.html', form=skill_add_form, type='testimoni',image=None)
return render_template('edit_data.html', form=skill_add_form, type='testimoni',image=None)
elif form_type == 'project':
projectdata = Project.query.filter_by(id=data_id).first()
projectform = ProjectForm(id=data_id)
if projectform.validate_on_submit():
for filled_data in projectform:
if filled_data.data == '':
filled_data.data = None
projectdata.p_name=projectform.title.data
projectdata.p_description=projectform.Description.data
projectdata.Organization=projectform.Organization.data
projectdata.p_url=projectform.URL.data
projectdata.cred_id=projectform.Credential.data
projectdata.certi_url = projectform.Certificate.data
db.session.commit()
return redirect(url_for('main.data'))
elif request.method == 'GET':
projectform.title.data = projectdata.p_name
projectform.Description.data = projectdata.p_description
projectform.Organization.data = projectdata.Organization
projectform.URL.data = projectdata.p_url
projectform.Credential.data = projectdata.cred_id
projectform.Certificate.data = projectdata.certi_url
return render_template('edit_data.html', form=projectform, type='testimoni')
return render_template('edit_data.html', form=projectform, type='testimoni')
elif form_type == 'job':
jobdata = Job.query.filter_by(id=data_id).first()
jobform = JobForm(id=data_id)
if jobform.validate_on_submit():
jobdata.role=jobform.role.data
jobdata.company=jobform.company.data
jobdata.start=datetime.strptime(jobform.start.data.strftime("%d %B %Y"),"%d %B %Y")
if jobform.end.data == None:
jobform.end.data = None
else:
jobform.end.data = datetime.strptime(jobform.end.data.strftime("%d %B %Y"),"%d %B %Y")
jobdata.end = jobform.end.data
jobdata.place=jobform.place.data
jobdata.jd=jobform.jd.data
db.session.commit()
return redirect(url_for('main.data'))
elif request.method == 'GET':
jobform.role.data = jobdata.role
jobform.company.data = jobdata.company
jobform.start.data = jobdata.start
if jobdata.end == None:
jobform.current.data = True
else:
jobform.end.data = jobdata.end
jobform.place.data = jobdata.place
jobform.jd.data = jobdata.jd
return render_template('edit_data.html', form=jobform, type=form_type)
return render_template('edit_data.html', form=jobform, type=form_type)
@main.route('/update/<int:data_id>', methods=['POST'])
def update_pic(data_id):
testimoni_data = Testimonial.query.filter_by(id=data_id).first()
pic_form = changepictureForm(id=data_id)
if pic_form.picture.data:
pic_file = save_picture(pic_form.picture.data)
testimoni_data.image_file = pic_file
else:
testimoni_data.image_file = 'recom.jpg'
db.session.commit()
print(testimoni_data.image_file)
return redirect(url_for('main.edit_post',data_id=data_id,form_type='testimoni'))
@main.route('/formdata/<path:form_type>/<int:data_id>/del_post', methods=['GET', 'POST'])
@login_required
def delete_post(data_id, form_type):
if form_type == 'testimoni':
Testimonial.query.filter_by(id=data_id).delete()
db.session.commit()
elif form_type == 'skill':
Skills.query.filter_by(id=data_id).delete()
db.session.commit()
elif form_type == 'project':
Project.query.filter_by(id=data_id).delete()
db.session.commit()
elif form_type == 'job':
Job.query.filter_by(id=data_id).delete()
db.session.commit()
return redirect(url_for('main.data'))
@main.route('/formdata/<path:form_type>/add_entry', methods=['GET', 'POST'])
@login_required
def add_entry(form_type):
if form_type == 'testimoni':
test_form = Testimoni_form()
if test_form.validate_on_submit():
if test_form.picture.data:
pic_file = save_picture(test_form.picture.data)
new_data = Testimonial(
name=test_form.name.data, desc=test_form.desc.data, testimony=test_form.main.data, image_file=pic_file,author=current_user)
else:
new_data = Testimonial(
name=test_form.name.data, desc=test_form.desc.data, testimony=test_form.main.data,author=current_user)
db.session.add(new_data)
db.session.commit()
return redirect(url_for('main.data'))
return render_template('new_data_test.html', form=test_form, type=form_type, pic=True,title='New Testimoni Data')
if form_type == 'skill':
skill_add_form = SkillForm()
if skill_add_form.validate_on_submit():
skilldata = Skills(sk_name=skill_add_form.skillsname.data,
sk_value=skill_add_form.skills.data,author=current_user)
db.session.add(skilldata)
db.session.commit()
return redirect(url_for('main.data'))
return render_template('new_data_test.html', form=skill_add_form,type='testimoni',title='New Skill Data')
if form_type == 'project':
projectform = ProjectForm()
if projectform.validate_on_submit():
for filled_data in projectform:
if filled_data.data == '':
filled_data.data = None
project_data = Project(p_name=projectform.title.data, p_description=projectform.Description.data,
Organization=projectform.Organization.data, p_url=projectform.URL.data,
cred_id=projectform.Credential.data, certi_url=projectform.Certificate.data,author=current_user)
db.session.add(project_data)
db.session.commit()
return redirect(url_for('main.data'))
return render_template('new_data_test.html', form=projectform,type='testimoni',title='New Project Data')
if form_type == 'job':
jobform = JobForm()
if jobform.validate_on_submit():
if jobform.end.data == None:
jobform.end.data = None
else:
jobform.end.data = datetime.strptime(jobform.end.data.strftime("%B %Y"),"%B %Y")
newjob = Job(role=jobform.role.data, company=jobform.company.data, start=datetime.strptime(jobform.start.data.strftime("%B %Y"),"%B %Y"),
end=jobform.end.data, place=jobform.place.data, jd=jobform.jd.data,author=current_user)
db.session.add(newjob)
db.session.commit()
return redirect(url_for('main.data'))
return render_template('new_data_test.html', form=jobform,type=form_type,title='New Job Data')
@main.route('/download_resume', methods=['GET'])
def getpdf():
# print(main.static_folder)
return send_from_directory(f'{main.static_folder}', "Dripta_Resume.pdf")
@main.route('/download/<path:filename>', methods=['GET'])
def download(filename):
path = os.path.join("static",'download')
return send_from_directory(path, filename)
@main.route('/upload_resume', methods=['POST'])
def upload():
file = request.files['file']
# name = file.filename
# current_app.config["resume_name_hex"] = f"{name.split('.')[0]}{uuid.uuid1().hex}.pdf"
save_path = os.path.join(main.static_folder, "Dripta_Resume.pdf")
file.save(save_path)
return jsonify({"massage": "all ok"})
|
from flask import Flask,request,jsonify
import requests
import json
def main():
file=open("sensor_registration.json","r")
data=json.load(file)
d = {'username':'test1234','config_file':data}
r=requests.post(url="http://13.68.206.239:5051/sensorregistration",json=d)
# r=requests.post(url="http://127.0.0.1:9000/temperature1",json = d)
# d = {'username':'ias11'}
# r=requests.post(url="http://127.0.0.1:5050/getsensordata",json=d)
# r=requests.post(url="http://127.0.0.1:5000/getsensordata",json=d)
d = r.json()
print(d)
# return 200
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 entro <entropy1208@yahoo.co.in>
#
# Distributed under terms of the MIT license.
import numpy as np
from sklearn import neighbors, svm
from sklearn.model_selection import cross_validate
from sklearn.naive_bayes import GaussianNB
import pandas as pd
from stat_tests import friedman_test, nemenyi_multitest
df = pd.read_csv('spambase.data', header=None)
X = df[df.columns[:-1]]
y = np.array(df[df.columns[-1]])
KNeighbors_clf = neighbors.KNeighborsClassifier()
SVM_clf = svm.SVC()
Naive_Bayes_clf = GaussianNB()
classifiers = [KNeighbors_clf, SVM_clf, Naive_Bayes_clf]
measurements = []
for classifier in classifiers:
scores = cross_validate(classifier, X, y,
scoring=['accuracy', 'f1'], cv=10,
return_train_score=False)
measurements.append(scores['test_accuracy'])
print "Results : ", scores['test_accuracy']
print "Train time : ", np.average(scores['fit_time'])
print "F1 measure : ", np.average(scores['test_f1'])
print "Accuracy : ", np.average(scores['test_accuracy'])
print "Standard Deviation : ", np.std(scores['test_accuracy'])
friedman_statistic, _, rankings, pivots = friedman_test(*measurements)
ranks = {j: rankings[i] for i, j in enumerate(['KNeighbors',
'SVC', 'Naive_Bayes'])}
critical_value = 7.8
if friedman_statistic > critical_value:
print '''Null Hypothesis rejected!
The average ranks as a whole display significant differences!
Doing Nemenyi test now!'''
comparisons, z_values, _, _ = nemenyi_multitest(ranks)
print comparisons, z_values
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
spec = load_test_spec("mpeg2", "encode")
def check_psnr(params):
call(
"ffmpeg -hwaccel qsv -hwaccel_device /dev/dri/renderD128 -v verbose"
" -c:v mpeg2_qsv -i {encoded} -vf 'hwdownload,format=nv12'"
" -pix_fmt {mformat} -f rawvideo -vsync passthrough -vframes {frames}"
" -y {decoded}".format(**params))
get_media().baseline.check_psnr(
psnr = calculate_psnr(
params["source"], params["decoded"],
params["width"], params["height"],
params["frames"], params["format"]),
context = params.get("refctx", []),
)
#-------------------------------------------------#
#---------------------- CQP ----------------------#
#-------------------------------------------------#
@slash.requires(have_ffmpeg)
@slash.requires(have_ffmpeg_qsv_accel)
@slash.requires(have_ffmpeg_mpeg2_qsv_encode)
@slash.requires(have_ffmpeg_mpeg2_qsv_decode)
@slash.requires(using_compatible_driver)
@slash.parametrize(*gen_mpeg2_cqp_parameters(spec, ['main', 'simple']))
@platform_tags(MPEG2_ENCODE_PLATFORMS)
def test_cqp(case, gop, bframes, qp, quality, profile):
params = spec[case].copy()
mprofile = mapprofile("mpeg2", profile)
if mprofile is None:
slash.skip_test("{} profile is not supported".format(profile))
params.update(
profile = mprofile, gop = gop, bframes = bframes, qp = qp,
quality = quality, mformat = mapformat(params["format"]))
params["encoded"] = get_media()._test_artifact(
"{}-{gop}-{bframes}-{qp}-{quality}-{profile}"
".m2v".format(case, **params))
params["decoded"] = get_media()._test_artifact(
"{}-{gop}-{bframes}-{qp}-{quality}-{profile}-{width}x{height}-{format}"
".yuv".format(case, **params))
if params["mformat"] is None:
slash.skip_test("{format} format not supported".format(**params))
call(
"ffmpeg -init_hw_device qsv=qsv:hw -hwaccel qsv -filter_hw_device qsv"
" -v verbose -f rawvideo -pix_fmt {mformat} -s:v {width}x{height}"
" -i {source} -vf 'format=nv12,hwupload=extra_hw_frames=64' -an"
" -c:v mpeg2_qsv -profile:v {profile} -g {gop} -bf {bframes}"
" -q {qp} -preset {quality} -vframes {frames}"
" -y {encoded}".format(**params))
check_psnr(params)
|
# -*- coding: utf-8 -*-
from flask import Flask, request, render_template, jsonify
import summarizer_machovec_modified
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
text = request.form.get('text') or ''
summary = ''
if text:
print('Summarizing...')
summary = summarizer_machovec_modified.summarize(text)
print(f'\n======summary======\n{summary}')
return jsonify(summary)
if __name__ == '__main__':
app.run()
|
"""Programa 7_4.py
Descrição: Escrever um programa que leia uma string e imprima quantas vezes cada caracter aparece.
Autor:Cláudio Schefer
Data:
Versão: 001
"""
# Declaração de variáveis
s = ""
d = {}
# Entrada de dados
s = input("Digite a string:")
d = {}
# Processamento
for letra in s:
if letra in d:
d[letra] = d[letra] + 1
else:
d[letra] = 1
for chave in d:
# Saída de dados
print(" %s : %dx" % (chave, d [chave]))
|
import tensorflow as tf
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
from tensorflow.python.framework.graph_util import convert_variables_to_constants
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(session, input_graph_def,
output_names, freeze_var_names)
return frozen_graph
mnist = tf.keras.datasets.mnist
tf.keras.backend.set_learning_phase(0)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train.reshape((-1, 28*28))
x_test = x_test.reshape((-1, 28*28))
model = tf.keras.models.Sequential()
#model.add(tf.keras.layers.Reshape((784,), input_shape=(28, 28, )))
model.add(tf.keras.layers.Dense(512, activation=tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax, name="output_node"))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(x_test.shape)
print(x_train.shape)
model.fit(x_train, y_train, epochs=1)
model.evaluate(x_test, y_test)
from tensorflow.contrib.keras import backend as K
# Create, compile and train model...
frozen_graph = freeze_session(K.get_session(),
output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, "some_directory", "my_model_text.pb", as_text=True)
tf.train.write_graph(K.get_session().graph_def, "some_directory", "my_model3.pb", as_text=True)
saver = tf.train.Saver()
tf.train.get_or_create_global_step()
sess = tf.keras.backend.get_session()
tf.train.write_graph(sess.graph_def, 'tmp', 'model.pbtxt')
save_path = saver.save(sess, 'tmp/model.cpkt')
# import cv2
# import numpy as np
#
# image = x_test[1, :, :]#np.zeros((28, 28), dtype=np.float32)
# print(image.shape)
# blob = cv2.dnn.blobFromImage(image)
#
# print(blob.shape)
# #print(blob)
#
# print("[INFO] loading model...")
# net = cv2.dnn.readNetFromTensorflow("some_directory/my_model.pb")
#
# print('set input')
# net.setInput(blob)
# print('forward')
# print(net.empty())
# #print(net.getLayersCount())
# preds = net.forward()
# print(preds)
# print('finished') |
"""Unit tests for stem.py"""
from sbws.util.stem import parse_user_torrc_config
def test_parse_user_torrc_config_new_keyvalue_options_success():
config_torrc_extra_lines = """
Log debug file /tmp/tor-debug.log
NumCPUs 1
"""
torrc_dict = parse_user_torrc_config({}, config_torrc_extra_lines)
assert torrc_dict == \
{'Log': 'debug file /tmp/tor-debug.log', 'NumCPUs': '1'}
def test_parse_user_torrc_config_existing_keyvalue_options_fail(caplog):
torrc_dict = {'SocksPort': 'auto'}
config_torrc_extra_lines = """
SocksPort 9050
"""
torrc_dict_new = parse_user_torrc_config(
torrc_dict, config_torrc_extra_lines)
# the new dictionary contains the existing key option and a list with both
# the existing value and the new value
assert torrc_dict_new != torrc_dict
assert torrc_dict_new == {'SocksPort': ['auto', '9050']}
def test_parse_user_torrc_config_new_key_option_success():
config_torrc_extra_lines = """
LongLivedPorts
"""
torrc_dict = parse_user_torrc_config({}, config_torrc_extra_lines)
assert torrc_dict == {'LongLivedPorts': None}
|
import tensorflow as tf
import sagemaker
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras import utils
import os, json, logging
import numpy as np
import boto3
from botocore.exceptions import ClientError
from tqdm import tqdm
def uploadFile(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
def main():
print('+------------------------------------------------')
print('| Downloading the training data ')
print('+------------------------------------------------')
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
X_train = (train_images/255).astype('float32')
X_test = (test_images/255).astype('float32')
X_train = X_train.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1, 28, 28, 1)
# Convert to float32 numpy arrays
numClasses = 10
y_train = utils.to_categorical(train_labels, numClasses)
y_test = utils.to_categorical(test_labels, numClasses)
print('\nGenerated array shapes:')
print(f'X_train : {X_train.shape}')
print(f'X_test : {X_test.shape}')
print(f'y_train : {y_train.shape}')
print(f'y_test : {y_test.shape}')
if not os.path.exists('data'):
print('The ./data folder does not exist. Generating the ./data folder')
os.makedirs('data')
print('+------------------------------------------------')
print('| Saving the data to a local folder ')
print('+------------------------------------------------')
np.save( 'data/X_train.npy', X_train )
np.save( 'data/y_train.npy', y_train )
np.save( 'data/X_test.npy', X_test )
np.save( 'data/y_test.npy', y_test )
# np.savez('data/training', image=X_train, label=y_train)
# np.savez('data/validation', image=X_test, label=y_test)
print('\nGenerated data within the ./data folder:')
print(os.listdir('data'))
print('+------------------------------------------------')
print('| Saving the data to the S3 bucket ')
print('+------------------------------------------------')
role = json.load( open('config/awsConfig/awsConfig.json') )['arn']
bucket = json.load( open('config/awsConfig/awsConfig.json') )['s3bucket']
uploadFile( 'data/X_train.npy', bucket, 'training/X_train.npy' )
uploadFile( 'data/y_train.npy', bucket, 'training/y_train.npy' )
uploadFile( 'data/X_test.npy', bucket, 'validation/X_test.npy' )
uploadFile( 'data/y_test.npy', bucket, 'validation/y_test.npy' )
print('+------------------------------------------------')
print('| Create data for TensorFlow Serving ')
print('+------------------------------------------------')
os.makedirs('data/serving/X', exist_ok=True)
os.makedirs('data/serving/y', exist_ok=True)
for i, (xTemp, yTemp) in enumerate(tqdm(zip(X_test, y_test), total=X_test.shape[0])):
fileName = f'data/serving/X/{i:07d}.npy'
np.save(fileName, xTemp)
# uploadFile(fileName, bucket, f'serving/X/{i:07d}.npy' )
with open(fileName.replace('.npy', '.json'), 'w') as fOut:
json.dump( xTemp.tolist(), fOut )
if i<10:
uploadFile(fileName, bucket, f'miniServing/X/{i:07d}.npy' )
uploadFile(fileName.replace('.npy', '.json'), bucket, f'miniServingJson/X/{i:07d}.json' )
fileName = f'data/serving/y/{i:07d}.npy'
np.save(fileName, yTemp)
# uploadFile(fileName, bucket, f'serving/y/{i:07d}.npy' )
with open(fileName.replace('.npy', '.json'), 'w') as fOut:
json.dump( yTemp.tolist(), fOut )
if i<10:
uploadFile(fileName, bucket, f'miniServing/y/{i:07d}.npy' )
uploadFile(fileName.replace('.npy', '.json'), bucket, f'miniServingJson/y/{i:07d}.json' )
return
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0703,R1711
'''
TTS service modules
GoogleTTS - TTS from google
XunfeiTTS - TTS from Xunfei (TBD)
AmazonPollyTTS - TTS from Amazon(TBD)
'''
import os
import logging
import html
from google.cloud import texttospeech
TTS_CONFIG = {
"GOOGLE_TTS_ENABLE": True,
"GOOGLE_APPLICATION_CREDENTIALS": "~/.configs.secure/google_tts_5ab978b84843.json",
"GOOGLE_TTS_LANAGUAGE_CODE": "cmn-CN",
"GOOGLE_TTS_VOICE_NAME": "cmn-CN-Wavenet-A",
"GOOGLE_TTS_SPEAKING_RATE": 0.9,
"GOOGLE_TTS_PARAGRAPH_BREAK_TIME": "200ms"
}
class GoogleTTS:
""" Wrapper class to provide google TTS service """
def __init__(self, config):
self.config = config
self.language_code = "cmn-CN"
self.voice_name = "cmn-CN-Wavenet-A"
self.speaking_rate = 1.0
self.paragraph_break_time = "1s"
if "GOOGLE_TTS_LANAGUAGE_CODE" in config:
self.language_code = config["GOOGLE_TTS_LANAGUAGE_CODE"]
if "GOOGLE_TTS_VOICE_NAME" in config:
self.voice_name = config["GOOGLE_TTS_VOICE_NAME"]
if "GOOGLE_TTS_SPEAKING_RATE" in config:
self.speaking_rate = float(config["GOOGLE_TTS_SPEAKING_RATE"])
if "GOOGLE_TTS_PARAGRAPH_BREAK_TIME" in config:
self.paragraph_break_time = config["GOOGLE_TTS_PARAGRAPH_BREAK_TIME"]
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.abspath(
os.path.expanduser(config["GOOGLE_APPLICATION_CREDENTIALS"]))
return
def synthesize_english_text_py2(self, content, output):
""" synthesize english from pure text input """
language_code = "en-US"
voice_name = "en-US-Wavenet-D"
client = texttospeech.TextToSpeechClient()
synthesis_input = texttospeech.types.SynthesisInput(text=content)
voice = texttospeech.types.VoiceSelectionParams(
language_code=language_code,
name=voice_name,
ssml_gender=texttospeech.enums.SsmlVoiceGender.MALE)
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3,
speaking_rate=self.speaking_rate)
response = client.synthesize_speech(
input_=synthesis_input,
voice=voice,
audio_config=audio_config)
# The response's audio_content is binary.
with open(output, "wb") as out:
out.write(response.audio_content)
logging.info('Audio content written to file %s', output)
return
@staticmethod
def list_voices():
"""Lists the available voices."""
client = texttospeech.TextToSpeechClient()
voices = client.list_voices()
for voice in voices.voices:
print("Name: {voice.name}")
for language_code in voice.language_codes:
print("Supported language: {language_code}")
ssml_gender = texttospeech.SsmlVoiceGender(voice.ssml_gender)
print("SSML Voice Gender: {ssml_gender.name}")
print("Natural Sample Rate Hertz: {voice.natural_sample_rate_hertz}\n")
def text_to_ssml(self, inputfile):
"""
auxiliary function to convert pure text to ssml .
break time will be inserted for each paragraph
"""
raw_lines = inputfile
escaped_lines = html.escape(raw_lines)
# Convert plaintext to SSML
# Wait two seconds between each address
ssml = "<speak>{}</speak>".format(
escaped_lines.replace("\n", '\n<break time="%s"/>'%self.paragraph_break_time)
)
return ssml
def synthesize_chinese_ssml(self, ssml_text, output):
""" synthesize Chinese from SSML input """
client = texttospeech.TextToSpeechClient()
synthesis_input = texttospeech.SynthesisInput(ssml=ssml_text)
voice = texttospeech.VoiceSelectionParams(
language_code=self.language_code,
name=self.voice_name,
ssml_gender=texttospeech.SsmlVoiceGender.FEMALE)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3,
speaking_rate=self.speaking_rate)
response = client.synthesize_speech(
input=synthesis_input,
voice=voice,
audio_config=audio_config)
with open(output, "wb") as out:
out.write(response.audio_content)
logging.info('Audio content written to file %s', output)
return
def synthesize_chinese_text(self, content, output):
""" synthesize Chinese from pure text input """
client = texttospeech.TextToSpeechClient()
synthesis_input = texttospeech.SynthesisInput(text=content)
voice = texttospeech.VoiceSelectionParams(
language_code=self.language_code,
name=self.voice_name,
ssml_gender=texttospeech.SsmlVoiceGender.FEMALE)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3,
speaking_rate=self.speaking_rate)
response = client.synthesize_speech(
input=synthesis_input,
voice=voice,
audio_config=audio_config)
# The response's audio_content is binary.
with open(output, "wb") as out:
out.write(response.audio_content)
logging.info('Audio content written to file %s', output)
return
|
if 'INSTALLED_APPS' not in locals()["__builtins__"]:
INSTALLED_APPS = []
INSTALLED_APPS += (
'bootstrap3',
'django_admin_bootstrapped',
'jquery',
'pinax.eventlog', # Not mandatory
'bootstrapform',
'django.contrib.staticfiles',
'django_tables2',
'background_task',
'django_tables2_reports',
'django_excel_to_model',
)
# TEMPLATE_CONTEXT_PROCESSORS += (
# "django.core.context_processors.request",
# 'django.core.context_processors.static',
# )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.