repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
alpha-beta-soup/errorgeopy | tests/test_geocoders.py | 1 | 6649 | import os
import encodings.idna
import pytest
import yaml
import geopy
import shapely
import errorgeopy.geocoders
@pytest.fixture
def addresses():
return (
'66 Great North Road, Grey Lynn, Auckland, 1021, New Zealand',
'Grey Lynn, Auckland, 1021, New Zealand',
'High Street, Lower Hutt, New Zealand',
'10 Aurora Street, Petone, Lower Hutt, Wellington', # Doesn't produce enough for a cluster
'10 Aurora Street, Petone, Lower Hutt, 5012', # Doesn't produce enough for a cluster
'Oriental Street, Lower Hutt, New Zealand',
'Oriental Bay, Wellington, New Zealand',
'Oriental Vay, Wellington, NZ', # Deliberate typo "Vay",
'ZJ6AZ2Ixgp1or4O' # Deliberate nonsense, random string
)
@pytest.fixture
def addresses_reverse():
return (
(-37.8004971, 174.868439), # Near me!
(-41.2296258, 174.8828724), # 10 Aurora Street, Petone, Lower Hutt
(-41.1945832, 174.9403476), # High Street, Lower Hutt
(-41.2910862, 174.7882479), # Oriental Bay, Wellington
# (-90, 0) # South Pole
# (-91, 181) # Out of range
)
@pytest.fixture
def configfile():
return os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'config.yml')
@pytest.fixture
def load_configfile():
return yaml.load(open(configfile(), 'r'))
@pytest.fixture
def geocoderpool_fromfile():
return errorgeopy.geocoders.GeocoderPool.fromfile(configfile(), yaml.load)
@pytest.fixture
def geocoderpool(load=True):
if load:
return errorgeopy.geocoders.GeocoderPool(load_configfile())
else:
# Default settings (free no-sign-up APIs)
return errorgeopy.geocoders.GeocoderPool()
def test_load_gpool_from_file_with_caller():
gpool = geocoderpool_fromfile()
assert isinstance(
gpool, errorgeopy.geocoders.GeocoderPool
), 'failed to produce a GeocoderPool object on instantiation'
assert gpool.config == yaml.load(open(configfile(
), 'r')), 'configuration was mutated on instantiation'
assert getattr(gpool._geocoders, '__iter__',
False), 'GeocoderPool._geocoders is not iterable'
assert all([
issubclass(x.geocoder.__class__, geopy.geocoders.base.Geocoder)
for x in gpool.geocoders
]), 'not all of the GeocoderPool geocoders are geopy.Geocoder objects'
def test_load_gpool_from_file_without_caller():
gpool = geocoderpool()
assert isinstance(
gpool, errorgeopy.geocoders.GeocoderPool
), 'failed to produce a GeocoderPool object on instantiation'
assert gpool.config == load_configfile(
), 'configuration was mutated on instantiation'
assert getattr(gpool._geocoders, '__iter__',
False), 'GeocoderPool._geocoders is not iterable'
assert all([
issubclass(x.geocoder.__class__, geopy.geocoders.base.Geocoder)
for x in gpool.geocoders
]), 'not all of the GeocoderPool geocoders are geopy.Geocoder objects'
def test_geocoder_pool():
gpool = geocoderpool()
assert isinstance(gpool.geocoders, list)
def _generic_test_geocoderpool(gpool):
assert callable(gpool.geocode)
for test_case in addresses():
res = gpool.geocode(test_case)
assert isinstance(res, errorgeopy.location.Location)
assert isinstance(res._polygonisable(), bool)
assert all(
[isinstance(x, geopy.location.Location) for x in res.locations])
assert all([isinstance(x, str) for x in res.addresses])
assert all([isinstance(x, geopy.Point) for x in res.points])
assert isinstance(res.multipoint, shapely.geometry.MultiPoint) or (
res.multipoint is None and len(res) == 0)
assert isinstance(res.mbc, shapely.geometry.Polygon) or (
res.mbc is None and len(res) < 2)
assert isinstance(res.concave_hull, shapely.geometry.Polygon) or (
res.concave_hull is None and len(res) < 4)
assert isinstance(res.convex_hull, shapely.geometry.Polygon) or (
res.convex_hull is None and len(res) < 3)
assert isinstance(
res.centroid,
shapely.geometry.Point) or (res.centroid is None and len(res) == 0)
assert isinstance(res.clusters,
errorgeopy.location.LocationClusters) or (
res.clusters is None and len(res) == 0)
assert (res.clusters is None and len(res) == 0) or isinstance(
res.clusters.geometry_collection,
shapely.geometry.GeometryCollection)
assert (res.clusters is None and len(res) == 0) or isinstance(
res.clusters.cluster_centres, shapely.geometry.MultiPoint)
assert isinstance(
res.most_central_location, shapely.geometry.Point) or (
res.most_central_location is None and len(res) == 0)
assert res.most_central_location in res._shapely_points() or (
res.most_central_location is None and len(res) == 0)
def test_geocode():
gpool = geocoderpool()
_generic_test_geocoderpool(gpool)
def test_simple_geocode():
gpool = geocoderpool(load=False)
_generic_test_geocoderpool(gpool)
def test_reverse_geocode():
gpool = geocoderpool()
assert callable(gpool.reverse)
for test_case in addresses_reverse():
res = gpool.reverse(test_case)
assert isinstance(res, errorgeopy.address.Address)
assert len(res.addresses) <= len(res.dedupe())
assert isinstance(res.longest_common_substring(), str)
extract1 = res.extract(' '.join(str(res.addresses[0]).split()[::3]))
assert isinstance(extract1, list)
if len(extract1) > 0:
assert type(extract1[0][0]) is geopy.location.Location
assert type(extract1[0][1]) is int
assert sorted(
[e[1] for e in extract1],
reverse=True) == [e[1] for e in extract1]
extract2 = res.extract(res.extract(str(res.addresses[0])[::6]))
assert isinstance(extract2, list)
if len(extract2) > 0:
assert type(extract2[0][0]) is geopy.location.Location
assert type(extract2[0][1]) is int
assert sorted(
[e[1] for e in extract2],
reverse=True) == [e[1] for e in extract2]
with pytest.raises(NotImplementedError):
res.longest_common_sequence()
with pytest.raises(NotImplementedError):
res.regex()
with pytest.raises(NotImplementedError):
res.parse()
with pytest.raises(NotImplementedError):
res.tag()
| mit | 523,301,348,730,464,060 | 34.747312 | 99 | 0.638893 | false |
onshape-public/onshape-clients | python/onshape_client/oas/models/card.py | 1 | 9994 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import three_d_secure
except ImportError:
three_d_secure = sys.modules["onshape_client.oas.models.three_d_secure"]
class Card(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"account": (str,), # noqa: E501
"address_city": (str,), # noqa: E501
"address_country": (str,), # noqa: E501
"address_line1": (str,), # noqa: E501
"address_line1_check": (str,), # noqa: E501
"address_line2": (str,), # noqa: E501
"address_state": (str,), # noqa: E501
"address_zip": (str,), # noqa: E501
"address_zip_check": (str,), # noqa: E501
"available_payout_methods": ([str],), # noqa: E501
"brand": (str,), # noqa: E501
"country": (str,), # noqa: E501
"currency": (str,), # noqa: E501
"customer": (str,), # noqa: E501
"cvc_check": (str,), # noqa: E501
"default_for_currency": (bool,), # noqa: E501
"description": (str,), # noqa: E501
"dynamic_last4": (str,), # noqa: E501
"exp_month": (int,), # noqa: E501
"exp_year": (int,), # noqa: E501
"fingerprint": (str,), # noqa: E501
"funding": (str,), # noqa: E501
"id": (str,), # noqa: E501
"iin": (str,), # noqa: E501
"instance_url": (str,), # noqa: E501
"issuer": (str,), # noqa: E501
"last4": (str,), # noqa: E501
"metadata": ({str: (str,)},), # noqa: E501
"name": (str,), # noqa: E501
"object": (str,), # noqa: E501
"recipient": (str,), # noqa: E501
"status": (str,), # noqa: E501
"three_d_secure": (three_d_secure.ThreeDSecure,), # noqa: E501
"tokenization_method": (str,), # noqa: E501
"type": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"account": "account", # noqa: E501
"address_city": "addressCity", # noqa: E501
"address_country": "addressCountry", # noqa: E501
"address_line1": "addressLine1", # noqa: E501
"address_line1_check": "addressLine1Check", # noqa: E501
"address_line2": "addressLine2", # noqa: E501
"address_state": "addressState", # noqa: E501
"address_zip": "addressZip", # noqa: E501
"address_zip_check": "addressZipCheck", # noqa: E501
"available_payout_methods": "availablePayoutMethods", # noqa: E501
"brand": "brand", # noqa: E501
"country": "country", # noqa: E501
"currency": "currency", # noqa: E501
"customer": "customer", # noqa: E501
"cvc_check": "cvcCheck", # noqa: E501
"default_for_currency": "defaultForCurrency", # noqa: E501
"description": "description", # noqa: E501
"dynamic_last4": "dynamicLast4", # noqa: E501
"exp_month": "expMonth", # noqa: E501
"exp_year": "expYear", # noqa: E501
"fingerprint": "fingerprint", # noqa: E501
"funding": "funding", # noqa: E501
"id": "id", # noqa: E501
"iin": "iin", # noqa: E501
"instance_url": "instanceURL", # noqa: E501
"issuer": "issuer", # noqa: E501
"last4": "last4", # noqa: E501
"metadata": "metadata", # noqa: E501
"name": "name", # noqa: E501
"object": "object", # noqa: E501
"recipient": "recipient", # noqa: E501
"status": "status", # noqa: E501
"three_d_secure": "threeDSecure", # noqa: E501
"tokenization_method": "tokenizationMethod", # noqa: E501
"type": "type", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""card.Card - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
account (str): [optional] # noqa: E501
address_city (str): [optional] # noqa: E501
address_country (str): [optional] # noqa: E501
address_line1 (str): [optional] # noqa: E501
address_line1_check (str): [optional] # noqa: E501
address_line2 (str): [optional] # noqa: E501
address_state (str): [optional] # noqa: E501
address_zip (str): [optional] # noqa: E501
address_zip_check (str): [optional] # noqa: E501
available_payout_methods ([str]): [optional] # noqa: E501
brand (str): [optional] # noqa: E501
country (str): [optional] # noqa: E501
currency (str): [optional] # noqa: E501
customer (str): [optional] # noqa: E501
cvc_check (str): [optional] # noqa: E501
default_for_currency (bool): [optional] # noqa: E501
description (str): [optional] # noqa: E501
dynamic_last4 (str): [optional] # noqa: E501
exp_month (int): [optional] # noqa: E501
exp_year (int): [optional] # noqa: E501
fingerprint (str): [optional] # noqa: E501
funding (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
iin (str): [optional] # noqa: E501
instance_url (str): [optional] # noqa: E501
issuer (str): [optional] # noqa: E501
last4 (str): [optional] # noqa: E501
metadata ({str: (str,)}): [optional] # noqa: E501
name (str): [optional] # noqa: E501
object (str): [optional] # noqa: E501
recipient (str): [optional] # noqa: E501
status (str): [optional] # noqa: E501
three_d_secure (three_d_secure.ThreeDSecure): [optional] # noqa: E501
tokenization_method (str): [optional] # noqa: E501
type (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
| mit | 5,214,020,824,491,918,000 | 38.976 | 82 | 0.537823 | false |
Wajihulhassan/SelfContainedPrevirt | tools/occam/occam/targets/ld.py | 1 | 7705 | # ------------------------------------------------------------------------------
# OCCAM
#
# Copyright © 2011-2012, SRI International
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of SRI International nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from occam import toolchain
from occam import target
from occam import driver
from occam.targets import argparser
from occam.targets import par
import logging
import tempfile
import os
def ld_default_o(input_file):
return 'a.out'
def useAsm(flags):
try:
return 'assembler' in flags[flags.index('-x')+1]
except ValueError,e:
return False
class LdTool (par.ParallelTool, argparser.ArgParser):
def flags(self): return [
'demangle', 'unique',
'trace-symbol', 'aarchive', 'ashared', 'adefault', 'd', 'dc', 'dp',
'E', 'export-dynamic', 'EB', 'EL', 'f', 'i', 'memulation', 'M',
'print-map', 'n', 'nmagic', 'N', 'omagic', 'no-omagic', 'q',
'emit-relocs', 'force-dynamic', 'r', 'relocatable', 's', 'strip-all',
'S', 'strip-debug', 't', 'trace', 'Ur', 'v', 'version', 'V', 'x',
'discard-all', 'X', 'discard-locals', 'accept-unknown-input-arch',
'no-accept-unknown-input-arch', 'as-needed', 'no-as-needed',
'add-needed', 'no-add-needed', 'Bdynamic', 'dy', 'call_shared',
'Bgroup', 'Bstatic', 'dn', 'non_shared', 'static', 'Bsymbolic',
'dynamic-list-cpp-typeinfo', 'check-sections', 'no-check-sections',
'cref', 'no-define-common', 'no-demangle', 'fatal-warnings',
'force-exe-suffix', 'gc-sections', 'no-gc-sections',
'print-gc-sections', 'no-print-gc-sections', 'help', 'target-help',
'no-keep-memory', 'no-undefined', 'allow-multiple-definition',
'allow-shlib-undefined', 'no-allow-shlib-undefined',
'no-undefined-version', 'default-symver', 'default-imported-symver',
'no-warn-mismatch', 'no-whole-archive', 'noinhibit-exec', 'nostdlib',
'pie', 'pic-executable', 'qmagic', 'Qy', 'relax', 'shared',
'Bshareable', 'sort-common', 'stats', 'traditional-format',
'dll-verbose', 'verbose', 'warn-common', 'warn-constructors',
'warn-multiple-gp', 'warn-once', 'warn-section-align',
'warn-shared-textrel', 'warn-unresolved-symbols',
'error-unresolved-symbols', 'whole-archive', 'eh-frame-hdr',
'enable-new-dtags', 'disable-new-dtags', 'reduce-memory-overheads',
'add-stdcall-alias', 'dll', 'enable-stdcall-fixup',
'disable-stdcall-fixup', 'export-all-symbols', 'file-alignment',
'kill-at', 'large-address-aware', 'enable-auto-image-base',
'disable-auto-image-base', 'enable-auto-import',
'disable-auto-import', 'enable-runtime-pseudo-reloc',
'disable-runtime-pseudo-reloc', 'enable-extra-pe-debug',
'section-alignment', 'no-trampoline'
]
def shortWithOpt(self): return [
'b', 'c', 'e', 'F', 'O', 'R', 'Ttext', 'Tbss', 'Tdata',
'u', 'y', 'Y', 'm', 'z', 'o', 'A', 'h', 'G', 'T', 'dynamic-linker'
]
def longWithOpt(self): return [
'architecture', 'format', 'mri-script', 'entry', 'gpsize', 'soname',
'just-symbols', 'script', 'undefined', 'unique', 'trace-symbol',
'dynamic-list', 'demangle', 'sysroot', 'unresolved-symbols',
'version-script', 'hash-size', 'hash-style', 'auxiliary', 'filter',
'fini', 'init', 'assert', 'defsym', 'dynamic-linker', 'Map',
'oformat', 'retain-symbols-file', 'rpath', 'rpath-link',
'sort-section', 'split-by-file', 'split-by-reloc', 'section-start',
'Tbss', 'Tdata', 'Text', 'wrap', 'base-file', 'image-base',
'major-image-version', 'major-os-version', 'major-subsystem-version',
'minor-image-version', 'minor-os-version', 'minor-subsystem-version',
'output-def', 'out-implib', 'dll-search-prefix', 'subsystem',
'bank-window', 'output'
]
def opts(self, args):
return ([], args)
def occam(self, cfg, args):
tool = self.name
#(input_files, output_file, flags) = parse_ld_args(args)
(input_files, output_file, flags) = self.parse_args(args)
print "ld input files: " + ' '.join(input_files)
# TODO: this seems to have side effects, but since I'm duplicating
# stdin it shouldn't, right?
cfg.log("%(in)s\n%(out)s\n%(fl)s\n",
{ 'in' : input_files.__repr__()
, 'out' : output_file.__repr__()
, 'fl' : flags.__repr__() })
if '-' in input_files:
# num = os.dup(0)
# fd = os.fdopen(num,'r')
# cfg.log("compiling stdin\n%(msg)s", {'msg' : fd.read()})
# fd.close()
return 0 # IAM: Could also check that output is /dev/null
if '/dev/null' == output_file:
cfg.log("skipping output /dev/null", {})
return 0
if len(input_files) == 0:
return 0
elif '-Wl,--help' in flags:
# this is just help
return 0
elif '-Wl,-shared' in flags:
# LLVM doesn't do shared...
return 0
elif '-Wl,-r' in flags or '-Wl,-i' in flags or '-Wl,--relocatable' in flags:
# this is essentially linking as a library
if output_file is None:
output_file = ld_default_o(input_files[0])
retcode = toolchain.bundle(self.fixname(output_file),
map(self.fixinputname,input_files),
[x for x in flags if x.startswith('-l')],
[x[2:] for x in flags if x.startswith('-L')])
return retcode
else:
if output_file is None:
output_file = ld_default_o(input_files[0])
retcode = toolchain.link(map(self.fixinputname,input_files),
self.fixname(output_file),
flags + ['-lc'],
save='%s_main.bc' % output_file,
link=True)
return retcode
for x in ['ld']:
target.register(x, LdTool(x))
| bsd-3-clause | 6,475,538,266,495,347,000 | 46.850932 | 84 | 0.579439 | false |
HiroyukiAbe/pimouse_ros | scripts/lightsensors.py | 1 | 1406 | #!/usr/bin/env python
#encoding: utf8
import sys, rospy
from pimouse_ros.msg import LightSensorValues
def get_freq():
f = rospy.get_param('lightsensors_freq',10)
try:
if f <= 0.0:
raise Exception()
except:
rospy.logerr("value error: lightsensors_freq")
sys.exit(1)
return f
if __name__ == '__main__':
devfile = '/dev/rtlightsensor0'
rospy.init_node('lightsensors')
pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1)
freq = get_freq()
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
try:
with open(devfile,'r') as f:
data = f.readline().split()
data = [ int(e) for e in data ]
d = LightSensorValues()
d.right_forward = data[0]
d.right_side = data[1]
d.left_side = data[2]
d.left_forward =data[3]
d.sum_all = sum(data)
d.sum_forward = data[0] + data[3]
pub.publish(d)
except:
rospy.logerr("cannot write to " + devfile)
f = get_freq()
if f != freq:
freq = f
rate = rospy.Rate(freq)
rate.sleep()
| bsd-3-clause | -61,620,430,447,069,090 | 29.565217 | 78 | 0.45377 | false |
shadsbot/AutoCrop | CropImage.py | 1 | 4681 | from PIL import Image, ImageTk, ImageDraw
class Crop:
"""Object that contains the tools to manipulate a spritesheet"""
def __init__(self, file="example.png", cropSize=[64,64], padding=0, offset=[0,0], direction="Both", numberCrops=0, useUserCrops=False):
self.direction = direction
self.offset = {"x" : offset[0], "y" : offset[1]}
self.padding = padding
self.cropSize = { "x" : cropSize[0], "y" : cropSize[1] }
self.numberCrops = numberCrops
self.useUserCrops = useUserCrops
try:
self.image = Image.open(file)
self.image.load()
except:
self.image = Image.new('RGB',(160,60), color='red')
self.imagedraw = ImageDraw.Draw(self.image)
self.imagedraw.text((10,10), "No image selected", fill=(0,0,0))
def setImage(self, file):
try:
self.image = Image.open(file)
self.image.load()
except:
self.image = Image.new('RGB',(160,60), color='red')
self.imagedraw = ImageDraw.Draw(self.image)
self.imagedraw.text((10,10), "Image not found", fill=(0,0,0))
def setDirection(self, direction):
self.direction = direction[:-2]
def setPadding(self, pad):
self.padding = pad
def setUserCrops(self, userCrops, number=0):
if (userCrops == "True"):
userCrops = True
else:
userCrops = False
self.numberCrops = number
self.useUserCrops = userCrops
def setSize(self, x, y):
self.cropSize = { "x" : x, "y" : y }
def setOffset(self, x, y):
self.offset = { "x" : x, "y" : y }
def horizontalLoops(self):
if self.useUserCrops:
return self.numberCrops
horizontalCrops = 0
index = self.offset["x"]
while (index < self.image.size[0]):
index = index + self.cropSize["x"] + self.padding
if (index <= self.image.size[0]):
horizontalCrops = horizontalCrops + 1
return horizontalCrops
def verticalLoops(self):
if self.useUserCrops:
return self.numberCrops
verticalCrops = 0
index = self.offset["y"]
while (index < self.image.size[1]):
index = index + self.cropSize["y"] + self.padding
if (index <= self.image.size[1]):
verticalCrops = verticalCrops + 1
return verticalCrops
def crop(self):
if self.direction == "Both":
for x in range(0,self.verticalLoops()):
currentYLoc = self.offset["y"] + ( x * (self.cropSize["y"]+self.padding) )
row = str(x) + "-"
self.cropHorizontally(currentYLoc,row)
elif self.direction == "Vertically":
self.cropVertically()
elif self.direction == "Horizontally":
self.cropHorizontally()
def cropHorizontally(self, currentYLoc=0, name=""):
if (currentYLoc == 0):
currentYLoc = self.offset["y"]
try:
for x in range(0,self.horizontalLoops()):
xposition = self.offset["x"] + (x * (self.cropSize["x"]+self.padding))
copy = self.image.crop((xposition, currentYLoc, xposition + self.cropSize["x"], currentYLoc + self.cropSize["y"]))
copy.save("%s%s.png" % (name,x))
return True
except:
print("An error occured during the cropHorizontally routine.")
return False
def cropVertically(self):
try:
for x in range(0,self.verticalLoops()):
yposition = self.offset["y"] + (x * (self.cropSize["y"]+self.padding))
copy = self.image.crop((self.offset["x"], yposition, self.offset["x"] + self.cropSize["x"], yposition + self.cropSize["y"]))
copy.save("%s.png" % x)
return True
except:
print("An error occured during the cropVertically routine.")
return False
def generatePreview(self):
try:
copy = self.image.copy()
tmp = ImageDraw.Draw(copy)
if (self.direction == "Both"):
for x in range(0,self.verticalLoops()):
currentYLoc = self.offset["y"] + ( x * (self.cropSize["y"]+self.padding) )
for y in range(0,self.horizontalLoops()):
xposition = self.offset["x"] + (y * (self.cropSize["x"]+self.padding))
tmp.rectangle( (xposition,currentYLoc,xposition+self.cropSize["x"],currentYLoc+self.cropSize["y"]), outline='red' )
if (self.direction == "Horizontally"):
for x in range(0,self.horizontalLoops()):
xposition = self.offset["x"] + (x * (self.cropSize["x"]+self.padding))
tmp.rectangle( (xposition,self.offset["y"],xposition+self.cropSize["x"],self.offset["y"]+self.cropSize["y"]), outline='red' )
if (self.direction == "Vertically"):
for x in range(0,self.verticalLoops()):
currentYLoc = self.offset["y"] + ( x * (self.cropSize["y"]+self.padding) )
xposition = self.offset["x"]
tmp.rectangle( (xposition,currentYLoc,xposition+self.cropSize["x"],currentYLoc+self.cropSize["y"]), outline='red' )
return copy
except:
return False
def debug(self):
print(self.direction)
print(self.offset)
print(self.padding)
print(self.cropSize)
print(self.numberCrops)
print(self.useUserCrops) | mit | -8,013,426,775,849,068,000 | 33.681481 | 136 | 0.660543 | false |
wdv4758h/flake8 | flake8/engine.py | 1 | 11434 | # -*- coding: utf-8 -*-
import errno
import io
import platform
import re
import sys
import warnings
import pep8
from flake8 import __version__
from flake8 import callbacks
from flake8.reporter import (multiprocessing, BaseQReport, FileQReport,
QueueReport)
from flake8 import util
_flake8_noqa = re.compile(r'\s*# flake8[:=]\s*noqa', re.I).search
EXTRA_EXCLUDE = ['.tox', '.eggs', '*.egg']
pep8.PROJECT_CONFIG += ('.flake8',)
def _load_entry_point(entry_point, verify_requirements):
"""Based on the version of setuptools load an entry-point correctly.
setuptools 11.3 deprecated `require=False` in the call to EntryPoint.load.
To load entry points correctly after that without requiring all
dependencies be present, the proper way is to call EntryPoint.resolve.
This function will provide backwards compatibility for older versions of
setuptools while also ensuring we do the right thing for the future.
"""
if hasattr(entry_point, 'resolve') and hasattr(entry_point, 'require'):
if verify_requirements:
entry_point.require()
plugin = entry_point.resolve()
else:
plugin = entry_point.load(require=verify_requirements)
return plugin
def _register_extensions():
"""Register all the extensions."""
extensions = util.OrderedSet()
extensions.add(('pep8', pep8.__version__))
parser_hooks = []
options_hooks = []
ignored_hooks = []
try:
from pkg_resources import iter_entry_points
except ImportError:
pass
else:
for entry in iter_entry_points('flake8.extension'):
# Do not verify that the requirements versions are valid
checker = _load_entry_point(entry, verify_requirements=False)
pep8.register_check(checker, codes=[entry.name])
extensions.add((checker.name, checker.version))
if hasattr(checker, 'add_options'):
parser_hooks.append(checker.add_options)
if hasattr(checker, 'parse_options'):
options_hooks.append(checker.parse_options)
if getattr(checker, 'off_by_default', False) is True:
ignored_hooks.append(entry.name)
return extensions, parser_hooks, options_hooks, ignored_hooks
def get_parser():
"""This returns an instance of optparse.OptionParser with all the
extensions registered and options set. This wraps ``pep8.get_parser``.
"""
(extensions, parser_hooks, options_hooks, ignored) = _register_extensions()
details = ', '.join('%s: %s' % ext for ext in extensions)
python_version = get_python_version()
parser = pep8.get_parser('flake8', '%s (%s) %s' % (
__version__, details, python_version
))
for opt in ('--repeat', '--testsuite', '--doctest'):
try:
parser.remove_option(opt)
except ValueError:
pass
if multiprocessing:
parser.config_options.append('jobs')
parser.add_option('-j', '--jobs', type='string', default='auto',
help="number of jobs to run simultaneously, "
"or 'auto'. This is ignored on Windows.")
parser.add_option('--exit-zero', action='store_true',
help="exit with code 0 even if there are errors")
for parser_hook in parser_hooks:
parser_hook(parser)
# See comment above regarding why this has to be a callback.
parser.add_option('--install-hook', default=False, dest='install_hook',
help='Install the appropriate hook for this '
'repository.', action='callback',
callback=callbacks.install_vcs_hook)
parser.add_option('--output-file', default=None,
help='Redirect report to a file.',
type='string', nargs=1, action='callback',
callback=callbacks.redirect_stdout)
parser.add_option('--enable-extensions', default='',
dest='enabled_extensions',
help='Enable plugins and extensions that are disabled '
'by default',
type='string')
parser.ignored_extensions = ignored
return parser, options_hooks
class NoQAStyleGuide(pep8.StyleGuide):
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
# Any "flake8: noqa" comments to ignore the entire file?
if any(_flake8_noqa(line) for line in fchecker.lines):
return 0
return fchecker.check_all(expected=expected, line_offset=line_offset)
class StyleGuide(object):
"""A wrapper StyleGuide object for Flake8 usage.
This allows for OSErrors to be caught in the styleguide and special logic
to be used to handle those errors.
"""
# Reasoning for error numbers is in-line below
serial_retry_errors = set([
# ENOSPC: Added by sigmavirus24
# > On some operating systems (OSX), multiprocessing may cause an
# > ENOSPC error while trying to trying to create a Semaphore.
# > In those cases, we should replace the customized Queue Report
# > class with pep8's StandardReport class to ensure users don't run
# > into this problem.
# > (See also: https://gitlab.com/pycqa/flake8/issues/74)
errno.ENOSPC,
# NOTE(sigmavirus24): When adding to this list, include the reasoning
# on the lines before the error code and always append your error
# code. Further, please always add a trailing `,` to reduce the visual
# noise in diffs.
])
def __init__(self, **kwargs):
# This allows us to inject a mocked StyleGuide in the tests.
self._styleguide = kwargs.pop('styleguide', NoQAStyleGuide(**kwargs))
@property
def options(self):
return self._styleguide.options
@property
def paths(self):
return self._styleguide.paths
def _retry_serial(self, func, *args, **kwargs):
"""This will retry the passed function in serial if necessary.
In the event that we encounter an OSError with an errno in
:attr:`serial_retry_errors`, this function will retry this function
using pep8's default Report class which operates in serial.
"""
try:
return func(*args, **kwargs)
except OSError as oserr:
if oserr.errno in self.serial_retry_errors:
self.init_report(pep8.StandardReport)
else:
raise
return func(*args, **kwargs)
def check_files(self, paths=None):
return self._retry_serial(self._styleguide.check_files, paths=paths)
def excluded(self, filename, parent=None):
return self._styleguide.excluded(filename, parent=parent)
def init_report(self, reporter=None):
return self._styleguide.init_report(reporter)
def input_file(self, filename, lines=None, expected=None, line_offset=0):
return self._retry_serial(
self._styleguide.input_file,
filename=filename,
lines=lines,
expected=expected,
line_offset=line_offset,
)
def _parse_multi_options(options, split_token=','):
r"""Split and strip and discard empties.
Turns the following:
A,
B,
into ["A", "B"].
Credit: Kristian Glass as contributed to pep8
"""
if options:
return [o.strip() for o in options.split(split_token) if o.strip()]
else:
return options
def _disable_extensions(parser, options):
ignored_extensions = set(getattr(parser, 'ignored_extensions', []))
enabled = set(_parse_multi_options(options.enabled_extensions))
# Remove any of the selected extensions from the extensions ignored by
# default.
ignored_extensions -= enabled
# Whatever is left afterwards should be unioned with options.ignore and
# options.ignore should be updated with that.
options.ignore = tuple(ignored_extensions.union(options.ignore))
def get_style_guide(**kwargs):
"""Parse the options and configure the checker. This returns a sub-class
of ``pep8.StyleGuide``."""
kwargs['parser'], options_hooks = get_parser()
styleguide = StyleGuide(**kwargs)
options = styleguide.options
_disable_extensions(kwargs['parser'], options)
if options.exclude and not isinstance(options.exclude, list):
options.exclude = pep8.normalize_paths(options.exclude)
elif not options.exclude:
options.exclude = []
# Add patterns in EXTRA_EXCLUDE to the list of excluded patterns
options.exclude.extend(pep8.normalize_paths(EXTRA_EXCLUDE))
for options_hook in options_hooks:
options_hook(options)
if util.warn_when_using_jobs(options):
if not multiprocessing:
warnings.warn("The multiprocessing module is not available. "
"Ignoring --jobs arguments.")
if util.is_windows():
warnings.warn("The --jobs option is not available on Windows. "
"Ignoring --jobs arguments.")
if util.is_using_stdin(styleguide.paths):
warnings.warn("The --jobs option is not compatible with supplying "
"input using - . Ignoring --jobs arguments.")
if options.diff:
warnings.warn("The --diff option was specified with --jobs but "
"they are not compatible. Ignoring --jobs arguments."
)
if options.diff:
options.jobs = None
force_disable_jobs = util.force_disable_jobs(styleguide)
if multiprocessing and options.jobs and not force_disable_jobs:
if options.jobs.isdigit():
n_jobs = int(options.jobs)
else:
try:
n_jobs = multiprocessing.cpu_count()
except NotImplementedError:
n_jobs = 1
if n_jobs > 1:
options.jobs = n_jobs
reporter = QueueReport
if options.quiet:
reporter = BaseQReport
if options.quiet == 1:
reporter = FileQReport
report = styleguide.init_report(reporter)
report.input_file = styleguide.input_file
styleguide.runner = report.task_queue.put
return styleguide
def get_python_version():
# The implementation isn't all that important.
try:
impl = platform.python_implementation() + " "
except AttributeError: # Python 2.5
impl = ''
return '%s%s on %s' % (impl, platform.python_version(), platform.system())
def make_stdin_get_value(original):
def stdin_get_value():
if not hasattr(stdin_get_value, 'cached_stdin'):
value = original()
if sys.version_info < (3, 0):
stdin = io.BytesIO(value)
else:
stdin = io.StringIO(value)
stdin_get_value.cached_stdin = stdin
else:
stdin = stdin_get_value.cached_stdin
return stdin.getvalue()
return stdin_get_value
pep8.stdin_get_value = make_stdin_get_value(pep8.stdin_get_value)
| mit | 5,101,505,648,745,265,000 | 35.298413 | 79 | 0.620255 | false |
hughsaunders/keystone | keystone/common/ldap/core.py | 1 | 71345 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap
import ldap.filter
import ldappool
import six
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LW
from keystone.openstack.common import log
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises: TypeError if value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
raise TypeError("value must be basestring, "
"not %s" % value.__class__.__name__)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises: UnicodeDecodeError for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are Python types, e.g. unicode,
boolean, integer, etc.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
try:
return LDAP_VALUES[val]
except KeyError:
pass
try:
return int(val)
except ValueError:
pass
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in six.iteritems(attrs):
try:
ldap_attrs[kind] = [ldap2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
This is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Returns True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Returns True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle multiple names for an
attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
Note that this function uses is_ava_value_equal to compare AVAs so the
limitations of that function apply here.
"""
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
"""Returns True if and only if the DNs are equal.
Two DNs are equal if they've got the same number of RDNs and if the RDNs
are the same at each position. See RFC4517.
Note that this function uses is_rdn_equal to compare RDNs so the
limitations of that function apply here.
:param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
"""Returns True if and only if the descendant_dn is under the dn.
:param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
# Use the last len(dn) RDNs.
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
'''Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
boudary, examples of type conversions are:
* booleans map to the strings 'TRUE' and 'FALSE'
* integer values map to their string representation.
* unicode strings are encoded in UTF-8
In addition to handling type conversions at the API boundary we
have the requirement to support more than one LDAP API
provider. Currently we have:
* python-ldap, this is the standard LDAP API for Python, it
requires access to a live LDAP server.
* Fake LDAP which emulates python-ldap. This is used for
testing without requiring a live LDAP server.
To support these requirements we need a layer that performs type
conversions and then calls another LDAP API which is configurable
(e.g. either python-ldap or the fake emulation).
We have an additional constraint at the time of this writing due to
limitations in the logging module. The logging module is not
capable of accepting UTF-8 encoded strings, it will throw an
encoding exception. Therefore all logging MUST be performed prior
to UTF-8 conversion. This means no logging can be performed in the
ldap APIs that implement the python-ldap API because those APIs
are defined to accept only UTF-8 strings. Thus the layer which
performs type conversions must also do the logging. We do the type
conversions in two steps, once to convert all Python types to
unicode strings, then log, then convert the unicode strings to
UTF-8.
There are a variety of ways one could accomplish this, we elect to
use a chaining technique whereby instances of this class simply
call the next member in the chain via the "conn" attribute. The
chain is constructed by passing in an existing instance of this
class as the conn attribute when the class is instantiated.
Here is a brief explanation of why other possible approaches were
not used:
subclassing
To perform the wrapping operations in the correct order
the type convesion class would have to subclass each of
the API providers. This is awkward, doubles the number of
classes, and does not scale well. It requires the type
conversion class to be aware of all possible API
providers.
decorators
Decorators provide an elegant solution to wrap methods and
would be an ideal way to perform type conversions before
calling the wrapped function and then converting the
values returned from the wrapped function. However
decorators need to be aware of the method signature, it
has to know what input parameters need conversion and how
to convert the result. For an API like python-ldap which
has a large number of different method signatures it would
require a large number of specialized
decorators. Experience has shown it's very easy to apply
the wrong decorator due to the inherent complexity and
tendency to cut-n-paste code. Another option is to
parameterize the decorator to make it "smart". Experience
has shown such decorators become insanely complicated and
difficult to understand and debug. Also decorators tend to
hide what's really going on when a method is called, the
operations being performed are not visible when looking at
the implemation of a decorated method, this too experience
has shown leads to mistakes.
Chaining simplifies both wrapping to perform type conversion as
well as the substitution of alternative API providers. One simply
creates a new instance of the API interface and insert it at the
front of the chain. Type conversions are explicit and obvious.
If a new method needs to be added to the API interface one adds it
to the abstract class definition. Should one miss adding the new
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
'''
@abc.abstractmethod
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_s(self, dn):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
class PythonLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which calls the
python-ldap API.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
def __init__(self, conn=None):
super(PythonLDAPHandler, self).__init__(conn=conn)
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
# The resp_ctrl_classes parameter is a recent addition to the
# API. It defaults to None. We do not anticipate using it.
# To run with older versions of python-ldap we do not pass it.
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def delete_s(self, dn):
return self.conn.delete_s(dn)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
return self.conn.delete_ext_s(dn, serverctrls, clientctrls)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None):
'''Method for common ldap initialization between PythonLDAPHandler and
PooledLDAPHandler.
'''
LOG.debug("LDAP init: url=%s", url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
# The certificate trust options apply for both LDAPS and TLS.
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in LDAP_TLS_CERTS.values():
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug("LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s",
tls_req_cert)
class MsgId(list):
'''Wrapper class to hold connection and msgid.'''
pass
def use_conn_pool(func):
'''Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
'''
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
'''Implementation of the LDAPHandler interface which uses pooled
connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
Keystone LDAP authentication logic authenticates an end user using its DN
and password via LDAP bind to establish supplied password is correct.
This can fill up the pool quickly (as pool re-uses existing connection
based on its bind data) and would not leave space in pool for connection
re-use for other LDAP operations.
Now a separate pool can be established for those requests when related flag
'use_auth_pool' is enabled. That pool can have its own size and
connection lifetime. Other pool attributes are shared between those pools.
If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
If 'use_auth_pool' is not enabled, then connection pooling is not used for
those LDAP operations.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
'''
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {} # static connector pool dict
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {} # connection specific options
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None, pool_retry_max=None,
pool_retry_delay=None, pool_conn_timeout=None,
pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level)
self.page_size = page_size
# Following two options are not added in common initialization as they
# need to follow a sequence in PythonLDAPHandler code.
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool: # separate pool when use_auth_pool enabled
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
# if option was not specified explicitly, then use connection default
# value for that option if there.
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
for option, invalue in six.iteritems(self.conn_options):
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
'''Not using use_conn_pool decorator here as this API takes cred as
input.
'''
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
# After connection generator is done `with` statement execution block
# connection is always released via finally block in ldappool.
# So this unbind is a no op.
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
'''This API is asynchoronus API which returns MsgId instance to be used
in result3 call.
To work with result3 API in predicatable manner, same LDAP connection
is needed which provided msgid. So wrapping used connection and msgid
in MsgId class. The connection associated with search_ext is released
once last hard reference to MsgId object is freed. This will happen
when the method is done with returned MsgId usage.
'''
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
'''This method is used to wait for and return the result of an
operation previously initiated by one of the LDAP asynchronous
operation routines (eg search_ext()) It returned an invocation
identifier (a message id) upon successful initiation of their
operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
'''
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
@use_conn_pool
def delete_s(self, conn, dn):
return conn.delete_s(dn)
@use_conn_pool
def delete_ext_s(self, conn, dn, serverctrls=None, clientctrls=None):
return conn.delete_ext_s(dn, serverctrls, clientctrls)
class KeystoneLDAPHandler(LDAPHandler):
'''Convert data types and perform logging.
This LDAP inteface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
OpenStack logging framework at the time of this writing is not
capable of accepting strings encoded in UTF-8, the log functions
will throw decoding errors if a non-ascii character appears in a
string.
Prior to the call Python data types are converted to a string
representation as required by the LDAP APIs.
Then logging is performed so we can track what is being
sent/received from LDAP. Also the logging filters security
sensitive items (i.e. passwords).
Then the string values are encoded into UTF-8.
Then the LDAP API entry point is invoked.
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
'''
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
return self
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert='demand', chase_referrals=None, debug_level=None,
use_pool=None, pool_size=None,
pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug("LDAP bind: who=%s", who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug("LDAP unbind")
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = map(utf8_encode, attrlist)
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s'
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
# The API for the simple paged results control changed between
# python-ldap 2.3 and 2.4. We need to detect the capabilities
# of the python-ldap version we are using.
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = map(utf8_encode, attrlist)
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
# LDAP server supports pagination
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
# There is more data still on the server
# so we request another page
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning(_LW('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.'))
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
py_result = convert_ldap_result(ldap_result)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def delete_s(self, dn):
LOG.debug("LDAP delete: dn=%s", dn)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_s(dn_utf8)
def delete_ext_s(self, dn, serverctrls=None, clientctrls=None):
LOG.debug('LDAP delete_ext: dn=%s serverctrls=%s clientctrls=%s',
dn, serverctrls, clientctrls)
dn_utf8 = utf8_encode(dn)
return self.conn.delete_ext_s(dn_utf8, serverctrls, clientctrls)
def __exit__(self, exc_type, exc_val, exc_tb):
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in six.iteritems(_HANDLERS):
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
"""Filter out private items in an entity dict.
:param entity_ref: the entity dictionary. The 'dn' field will be removed.
'dn' is used in LDAP, but should not be returned to the user. This
value may be modified.
:returns: entity_ref
"""
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_SUFFIX = "dc=example,dc=com"
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
DUMB_MEMBER_DN = 'cn=dumb,dc=nonexistent'
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.suffix = conf.ldap.suffix
if self.suffix is None:
self.suffix = self.DEFAULT_SUFFIX
dn = '%s_tree_dn' % self.options_name
self.tree_dn = (getattr(conf.ldap, dn)
or '%s,%s' % (self.DEFAULT_OU, self.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in six.iteritems(self.attribute_options_names):
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
allow_create = '%s_allow_create' % self.options_name
self.allow_create = getattr(conf.ldap, allow_create)
allow_update = '%s_allow_update' % self.options_name
self.allow_update = getattr(conf.ldap, allow_update)
allow_delete = '%s_allow_delete' % self.options_name
self.allow_delete = getattr(conf.ldap, allow_delete)
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
self.use_dumb_member = getattr(conf.ldap, 'use_dumb_member')
self.dumb_member = (getattr(conf.ldap, 'dumb_member') or
self.DUMB_MEMBER_DN)
self.subtree_delete_enabled = getattr(conf.ldap,
'allow_subtree_delete')
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
def _parse_extra_attrs(self, option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except Exception:
LOG.warn(_LW(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>'),
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def _is_dumb_member(self, member_dn):
"""Checks that member is a dumb member.
:param member_dn: DN of member to be checked.
"""
return (self.use_dumb_member
and is_dn_equal(member_dn, self.dumb_member))
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime
)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
return conn
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = dict((k.lower(), v) for k, v in six.iteritems(res[1]))
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = _LW('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr,
'dn': res[0]})
LOG.warn(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
# Ignore attributes that are mapped to None.
continue
v = lower_res[map_attr.lower()]
except KeyError:
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def check_allow_create(self):
if not self.allow_create:
action = _('LDAP %s create') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_update(self):
if not self.allow_update:
action = _('LDAP %s update') % self.options_name
raise exception.ForbiddenAction(action=action)
def check_allow_delete(self):
if not self.allow_delete:
action = _('LDAP %s delete') % self.options_name
raise exception.ForbiddenAction(action=action)
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound:
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in six.iteritems(values):
if k in self.attribute_ignore:
continue
if k == 'id':
# no need to check if v is None as 'id' will always have
# a value
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in six.iteritems(self.extra_attr_mapping)
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
if 'groupOfNames' in object_classes and self.use_dumb_member:
attrs.append(('member', [self.dumb_member]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_all(self, ldap_filter=None):
query = u'(&%s(objectClass=%s))' % (ldap_filter or
self.ldap_filter or
'', self.object_class)
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
self.attribute_mapping.values() +
self.extra_attr_mapping.keys())))
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
six.iteritems(query_params)])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None):
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in six.iteritems(values):
if k == 'id' or k in self.attribute_ignore:
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def delete(self, object_id):
with self.get_connection() as conn:
try:
conn.delete_s(self._id_to_dn(object_id))
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
def deleteTree(self, object_id):
tree_delete_control = ldap.controls.LDAPControl(CONTROL_TREEDELETE,
0,
None)
with self.get_connection() as conn:
try:
conn.delete_ext_s(self._id_to_dn(object_id),
serverctrls=[tree_delete_control])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
except ldap.NOT_ALLOWED_ON_NONLEAF:
# Most LDAP servers do not support the tree_delete_control.
# In these servers, the usual idiom is to first perform a
# search to get the entries to delete, then delete them in
# in order of child to parent, since LDAP forbids the
# deletion of a parent entry before deleting the children
# of that parent. The simplest way to do that is to delete
# the entries in order of the length of the DN, from longest
# to shortest DN.
dn = self._id_to_dn(object_id)
scope = ldap.SCOPE_SUBTREE
# With some directory servers, an entry with objectclass
# ldapsubentry will not be returned unless it is explicitly
# requested, by specifying the objectclass in the search
# filter. We must specify this, with objectclass=*, in an
# LDAP filter OR clause, in order to return all entries
filt = '(|(objectclass=*)(objectclass=ldapsubentry))'
# We only need the DNs of the entries. Since no attributes
# will be returned, we do not have to specify attrsonly=1.
entries = conn.search_s(dn, scope, filt, attrlist=DN_ONLY)
if entries:
for dn in sorted((e[0] for e in entries),
key=len, reverse=True):
conn.delete_s(dn)
else:
LOG.debug('No entries in LDAP subtree %s', dn)
def add_member(self, member_dn, member_list_dn):
"""Add member to the member list.
:param member_dn: DN of member to be added.
:param member_list_dn: DN of group to which the
member will be added.
:raises: exception.Conflict: If the user was already a member.
self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def remove_member(self, member_dn, member_list_dn):
"""Remove member from the member list.
:param member_dn: DN of member to be removed.
:param member_list_dn: DN of group from which the
member will be removed.
:raises: self.NotFound: If the group entry didn't exist.
ldap.NO_SUCH_ATTRIBUTE: If the user wasn't a member.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_DELETE, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def _delete_tree_nodes(self, search_base, scope, query_params=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
query = (u'(&%s%s)' %
(query, ''.join(['(%s=%s)'
% (k, ldap.filter.escape_filter_chars(v))
for k, v in
six.iteritems(query_params)])))
not_deleted_nodes = []
with self.get_connection() as conn:
try:
nodes = conn.search_s(search_base, scope, query,
attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
LOG.debug('Could not find entry with dn=%s', search_base)
raise self._not_found(self._dn_to_id(search_base))
else:
for node_dn, _t in nodes:
try:
conn.delete_s(node_dn)
except ldap.NO_SUCH_OBJECT:
not_deleted_nodes.append(node_dn)
if not_deleted_nodes:
LOG.warn(_("When deleting entries for %(search_base)s, could not"
" delete nonexistent entries %(entries)s%(dots)s"),
{'search_base': search_base,
'entries': not_deleted_nodes[:3],
'dots': '...' if len(not_deleted_nodes) > 3 else ''})
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates groupOfNames holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that groupOfNames, default is
cn=enabled_${name}s,${tree_dn}
Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
${tree_dn} is self.tree_dn.
"""
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
# Extract the attribute name and value from the configured DN.
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id):
dn = self._id_to_dn(object_id)
query = '(member=%s)' % dn
with self.get_connection() as conn:
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, ['cn'])
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
if not self._get_enabled(object_id):
modlist = [(ldap.MOD_ADD,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', ['groupOfNames']),
('member', [self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
'member',
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
ref['enabled'] = self._get_enabled(object_id)
return ref
def get_all(self, ldap_filter=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
tenant_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(ldap_filter)
if x[0] != self.enabled_emulation_dn]
for tenant_ref in tenant_list:
tenant_ref['enabled'] = self._get_enabled(tenant_ref['id'])
return tenant_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
def delete(self, object_id):
if self.enabled_emulation:
self._remove_enabled(object_id)
super(EnabledEmuMixIn, self).delete(object_id)
| apache-2.0 | -522,299,333,275,910 | 38.969188 | 79 | 0.571266 | false |
rphillips/bitbake | lib/bb/__init__.py | 1 | 4327 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Build System Python Library
#
# Copyright (C) 2003 Holger Schurig
# Copyright (C) 2003, 2004 Chris Larson
#
# Based on Gentoo's portage.py.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__version__ = "1.11.0"
import sys
if sys.version_info < (2, 6, 0):
raise RuntimeError("Sorry, python 2.6.0 or later is required for this version of bitbake")
import os
import logging
import traceback
class NullHandler(logging.Handler):
def emit(self, record):
pass
Logger = logging.getLoggerClass()
class BBLogger(Logger):
def __init__(self, name):
if name.split(".")[0] == "BitBake":
self.debug = self.bbdebug
Logger.__init__(self, name)
def bbdebug(self, level, msg, *args, **kwargs):
return self.log(logging.DEBUG - level - 1, msg, *args, **kwargs)
def plain(self, msg, *args, **kwargs):
return self.log(logging.INFO + 1, msg, *args, **kwargs)
def verbose(self, msg, *args, **kwargs):
return self.log(logging.INFO - 1, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
return self.critical("%s\n%s" % (msg, traceback.format_exc()), *args, **kwargs)
logging.raiseExceptions = False
logging.setLoggerClass(BBLogger)
logger = logging.getLogger("BitBake")
logger.addHandler(NullHandler())
logger.setLevel(logging.INFO)
# This has to be imported after the setLoggerClass, as the import of bb.msg
# can result in construction of the various loggers.
import bb.msg
if "BBDEBUG" in os.environ:
level = int(os.environ["BBDEBUG"])
if level:
bb.msg.set_debug_level(level)
# Messaging convenience functions
def plain(*args):
logger.plain(''.join(args))
def debug(lvl, *args):
logger.debug(lvl, ''.join(args))
def note(*args):
logger.info(''.join(args))
def warn(*args):
logger.warn(''.join(args))
def error(*args):
logger.error(''.join(args))
def fatal(*args):
logger.critical(''.join(args))
sys.exit(1)
def deprecated(func, name = None, advice = ""):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
import warnings
if advice:
advice = ": %s" % advice
if name is None:
name = func.__name__
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s%s." % (name,
advice),
category = PendingDeprecationWarning,
stacklevel = 2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
# For compatibility
def deprecate_import(current, modulename, fromlist, renames = None):
"""Import objects from one module into another, wrapping them with a DeprecationWarning"""
import sys
module = __import__(modulename, fromlist = fromlist)
for position, objname in enumerate(fromlist):
obj = getattr(module, objname)
newobj = deprecated(obj, "{0}.{1}".format(current, objname),
"Please use {0}.{1} instead".format(modulename, objname))
if renames:
newname = renames[position]
else:
newname = objname
setattr(sys.modules[current], newname, newobj)
deprecate_import(__name__, "bb.fetch", ("MalformedUrl", "encodeurl", "decodeurl"))
deprecate_import(__name__, "bb.utils", ("mkdirhier", "movefile", "copyfile", "which"))
deprecate_import(__name__, "bb.utils", ["vercmp_string"], ["vercmp"])
| gpl-2.0 | 6,545,991,354,082,631,000 | 30.816176 | 94 | 0.645251 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_backend_health_py3.py | 1 | 1134 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ApplicationGatewayBackendHealth(Model):
"""List of ApplicationGatewayBackendHealthPool resources.
:param backend_address_pools:
:type backend_address_pools:
list[~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayBackendHealthPool]
"""
_attribute_map = {
'backend_address_pools': {'key': 'backendAddressPools', 'type': '[ApplicationGatewayBackendHealthPool]'},
}
def __init__(self, *, backend_address_pools=None, **kwargs) -> None:
super(ApplicationGatewayBackendHealth, self).__init__(**kwargs)
self.backend_address_pools = backend_address_pools
| mit | -8,204,238,813,715,144,000 | 38.103448 | 113 | 0.631393 | false |
akaariai/django-reverse-unique | reverse_unique_tests/models.py | 1 | 3974 | from __future__ import unicode_literals
from datetime import date
from django.db import models
from django.db.models import Q, F
from django.utils.translation import get_language
from reverse_unique import ReverseUnique
def filter_lang():
return Q(lang=get_language())
class Article(models.Model):
pub_date = models.DateField()
active_translation = ReverseUnique(
"ArticleTranslation", filters=filter_lang)
class Meta:
app_label = 'reverse_unique'
class Lang(models.Model):
code = models.CharField(max_length=2, primary_key=True)
class Meta:
app_label = 'reverse_unique'
class ArticleTranslation(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
lang = models.ForeignKey(Lang, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
abstract = models.CharField(max_length=100, null=True)
body = models.TextField()
class Meta:
unique_together = ('article', 'lang')
app_label = 'reverse_unique'
# The idea for DefaultTranslationArticle is that article's have default
# language. This allows testing of filter condition targeting both
# tables in the join.
class DefaultTranslationArticle(models.Model):
pub_date = models.DateField()
default_lang = models.CharField(max_length=2)
active_translation = ReverseUnique(
"DefaultTranslationArticleTranslation", filters=filter_lang)
default_translation = ReverseUnique(
"DefaultTranslationArticleTranslation", filters=Q(lang=F('article__default_lang')))
class Meta:
app_label = 'reverse_unique'
class DefaultTranslationArticleTranslation(models.Model):
article = models.ForeignKey(DefaultTranslationArticle, on_delete=models.CASCADE)
lang = models.CharField(max_length=2)
title = models.CharField(max_length=100)
abstract = models.CharField(max_length=100, null=True)
body = models.TextField()
class Meta:
unique_together = ('article', 'lang')
app_label = 'reverse_unique'
class Guest(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'reverse_unique'
def filter_reservations():
return Q(from_date__lte=date.today()) & (
Q(until_date__gte=date.today()) | Q(until_date__isnull=True))
class Room(models.Model):
current_reservation = ReverseUnique(
"Reservation", through='reservations',
filters=filter_reservations)
class Meta:
app_label = 'reverse_unique'
class Reservation(models.Model):
room = models.ForeignKey(Room, on_delete=models.CASCADE, related_name='reservations')
guest = models.ForeignKey(Guest, on_delete=models.CASCADE)
from_date = models.DateField()
until_date = models.DateField(null=True) # NULL means reservation "forever".
class Meta:
app_label = 'reverse_unique'
class Parent(models.Model):
rel1 = ReverseUnique("Rel1", filters=Q(f1="foo"))
uniq_field = models.CharField(max_length=10, unique=True, null=True)
class Meta:
app_label = 'reverse_unique'
class Rel1(models.Model):
parent = models.ForeignKey(Parent, on_delete=models.CASCADE, related_name="rel1list")
f1 = models.CharField(max_length=10)
class Meta:
app_label = 'reverse_unique'
class Child(Parent):
rel2 = ReverseUnique("Rel2", filters=Q(f1="foo"))
class Meta:
app_label = 'reverse_unique'
class AnotherChild(Child):
rel1_child = ReverseUnique("Rel1", filters=Q(f1__startswith="foo"))
class Meta:
app_label = 'reverse_unique'
class Rel2(models.Model):
child = models.ForeignKey(Child, on_delete=models.CASCADE, related_name="rel2list")
f1 = models.CharField(max_length=10)
class Meta:
app_label = 'reverse_unique'
class Rel3(models.Model):
a_model = models.ForeignKey(Parent, on_delete=models.CASCADE, to_field='uniq_field')
class Meta:
app_label = 'reverse_unique'
| bsd-3-clause | -8,017,831,775,881,084,000 | 26.79021 | 91 | 0.693256 | false |
kobotoolbox/kpi | kpi/management/commands/remove_duplicate_assetversions.py | 1 | 9498 | # coding: utf-8
import json
from collections import defaultdict
from hashlib import md5
from django.core.management.base import BaseCommand
from django.db import transaction
from ...models import Asset, AssetVersion
ROUGH_BATCH_MEM_LIMIT_MB = 100
MAX_BATCH_SIZE = 100
def find_original_and_duplicate_versions(version_pks, asset_pk):
"""
Given a list of `AssetVersion` primary keys, returns a tuple of:
* a list of the original `AssetVersion` primary keys;
* a list of the duplicate primary keys;
* the batch size used to fetch the versions without memory exhaustion.
Duplicates are identified by the following method:
* Remove all `$kuid`s from `version_content['survey']` and
`version_content['choices']`;
* Serialize the modified `version_content`, `deployed_content`, `name`,
`_deployment_data`, and `deployed` to JSON;
* Calculate the MD5 digest of that JSON;
* Consider the first `AssetVersion` (ordered by `pk`) with a given MD5
to be the original, and any subsequent `AssetVersion`s with the
same MD5 to be duplicates.
:param version_pks: an iterable of `AssetVersion` primary keys to search
for duplicates. They MUST all belong to the same `Asset`.
:param asset_pk: the primary key of the `Asset` to which all versions
belong. This is required as a safety check.
"""
version_pks = sorted(version_pks)
digests_to_first_version_pks = defaultdict(list)
start = 0
batch_size = 1
batch_size_guessed = False
while True:
this_batch_version_pks = version_pks[start:start + batch_size]
if not this_batch_version_pks:
break
versions = AssetVersion.objects.filter(
asset_id=asset_pk,
pk__in=this_batch_version_pks
).order_by('pk')
for version in versions.iterator():
for kuid_containing in 'survey', 'choices':
try:
for item in version.version_content[kuid_containing]:
try:
del item['$kuid']
except KeyError:
pass
except KeyError:
continue
serialized = json.dumps((
version.deployed_content,
version.name,
version._deployment_data, # noqa
version.version_content,
version.deployed
), sort_keys=True)
digest = md5(serialized).digest()
digests_to_first_version_pks[digest].append({
'pk': version.pk,
'uid': version.uid,
})
start += batch_size
if not batch_size_guessed:
batch_size = max(
1, int(ROUGH_BATCH_MEM_LIMIT_MB * 1024 * 1024 / len(serialized)))
batch_size = min(batch_size, MAX_BATCH_SIZE)
batch_size_guessed = True
duplicates_of = {}
duplicate_version_pks = []
for (digest, matches) in digests_to_first_version_pks.items():
if len(matches) > 1:
duplicates_of[matches[0]['pk']] = [m['uid'] for m in matches[1:]]
duplicate_version_pks = duplicate_version_pks + [
m['pk'] for m in matches[1:]
]
return (
duplicates_of.keys(),
duplicate_version_pks,
duplicates_of,
batch_size,
)
class Command(BaseCommand):
help = (
'Remove duplicate `AssetVersion`s as identified by their content '
'(after stripping `$kuid`s). Output is tab-delimited with the '
'following columns:\n'
'\tUsername\n\tAsset UID\n\tOriginal Version Count\n'
'\tDuplicate Version Count\n'
'\tAsterisk If Deployed Version Is Duplicate\n'
'The currently deployed version will never be deleted.'
)
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Show information about duplicates but do not remove them'
)
parser.add_argument(
'--username',
action='store',
dest='username',
default=False,
help='Consider only versions owned by a specific user'
)
parser.add_argument(
'--asset-uid',
action='store',
dest='asset_uid',
default=False,
help='Consider only versions of the specified `Asset`'
)
def handle(self, *args, **options):
versions = AssetVersion.objects.order_by('pk')
username = options.get('username')
if username:
versions = versions.filter(asset__owner__username=username)
asset_uid = options.get('asset_uid')
if asset_uid:
versions = versions.filter(asset__uid=asset_uid)
# Trying to get the ORM to annotate each `Asset` with a count of its
# `AssetVersion`s is unusably slow
self.stderr.write('Listing versions (may take several seconds)...')
version_dump = versions.values_list('pk', 'asset_id')
versions_for_assets = defaultdict(list)
for version_pk, asset_pk in version_dump:
versions_for_assets[asset_pk].append(version_pk)
version_counts_for_assets = {
asset_pk: len(version_pks) for
asset_pk, version_pks in versions_for_assets.items()
}
# Sort descending by version count; the higher the version count, the
# more likely many of the versions are duplicates
assets_sorted_by_version_count = sorted(
version_counts_for_assets, key=version_counts_for_assets.get,
reverse=True
)
self.stderr.write(
'Found {} versions for {} assets; '
'maximum {} versions per asset'.format(
len(version_dump),
len(versions_for_assets),
version_counts_for_assets[assets_sorted_by_version_count[0]]
)
)
for asset_pk in assets_sorted_by_version_count:
with transaction.atomic():
asset_values = Asset.objects.filter(
pk=asset_pk
).values_list('owner__username', 'uid', '_deployment_data')
if not asset_values:
# Asset with this PK disappeared before we got to it
continue
username, uid, deployment_data = asset_values[0]
# Find the currently deployed version; we'll never delete it
# even if it's a duplicate
currently_deployed_uid = json.loads(deployment_data).get(
'version', None)
currently_deployed_pk = AssetVersion.objects.filter(
uid=currently_deployed_uid).values_list('pk', flat=True)
original_version_pks, duplicate_version_pks, duplicate_uids, \
batch_size = find_original_and_duplicate_versions(
versions_for_assets[asset_pk], asset_pk)
pks_to_delete = duplicate_version_pks
currently_deployed_is_duplicate = False
if currently_deployed_pk:
try:
# Don't delete the currently deployed version
pks_to_delete.remove(currently_deployed_pk[0])
except ValueError:
pass
else:
currently_deployed_is_duplicate = True
output = (
username,
uid,
len(original_version_pks),
len(duplicate_version_pks),
'*' if currently_deployed_is_duplicate else ''
)
self.stdout.write(('{}\t' * len(output)).format(*output))
if not options.get('dry_run'):
# Store the UIDs of all duplicate versions in the original
# version's `uid_aliases` field
for pk, new_uid_aliases in duplicate_uids.items():
version_qs = AssetVersion.objects.filter(pk=pk)
uid_aliases = version_qs.values_list(
'uid_aliases', flat=True)[0]
if not uid_aliases:
uid_aliases = new_uid_aliases
else:
uid_aliases.extend(new_uid_aliases)
version_qs.update(uid_aliases=uid_aliases)
# Haha, silly programmer: you thought you could delete all
# these versions at once without memory exhaustion?!
# There are FKs (e.g. from `AssetSnapshot`) that require
# Django to take the slow path for cascade deletion
start = 0
while True:
this_batch_version_pks = pks_to_delete[
start:start + batch_size]
if not this_batch_version_pks:
break
AssetVersion.objects.filter(
pk__in=this_batch_version_pks
).delete()
start += batch_size
| agpl-3.0 | 5,937,014,375,773,076,000 | 39.589744 | 81 | 0.540114 | false |
mihaelacr/pydeeplearn | code/lib/trainingoptions.py | 1 | 1144 | """ Defines a training options class as a holder for options that can be passed
for training a neural network.
"""
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import numpy as np
# TODO: move from common here
import common
class TrainingOptions(object):
def __init__(self, miniBatchSize,
learningRate,
momentumMax=0.0,
rmsprop=False,
weightDecayL1=0.0,
weightDecayL2=0.0,
nesterovMomentum=False,
save_best_weights=False,
momentumForEpochFunction=common.getMomentumForEpochLinearIncrease,
momentumFactorForLearningRate=False):
self.miniBatchSize = miniBatchSize
self.learningRate = learningRate
self.momentumMax = np.float32(momentumMax)
self.rmsprop = rmsprop
self.weightDecayL1 = weightDecayL1
self.weightDecayL2 = weightDecayL2
self.nesterov = nesterovMomentum
self.momentumFactorForLearningRate = momentumFactorForLearningRate
self.momentumForEpochFunction = momentumForEpochFunction
self.batchLearningRate = np.float32(learningRate / miniBatchSize)
self.save_best_weights = save_best_weights
| bsd-3-clause | -2,303,735,449,736,076,000 | 32.647059 | 79 | 0.733392 | false |
Goldmund-Wyldebeast-Wunderliebe/raven-python | raven/contrib/django/client.py | 1 | 6779 | """
raven.contrib.django.client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.http import HttpRequest
from django.template import TemplateSyntaxError
from django.template.loader import LoaderOrigin
from raven.base import Client
from raven.contrib.django.utils import get_data_from_template, get_host
from raven.contrib.django.middleware import SentryLogMiddleware
from raven.utils.wsgi import get_headers, get_environ
__all__ = ('DjangoClient',)
class DjangoClient(Client):
logger = logging.getLogger('sentry.errors.client.django')
def is_enabled(self):
return bool(self.servers or 'sentry' in settings.INSTALLED_APPS)
def get_user_info(self, user):
if not user.is_authenticated():
return {'is_authenticated': False}
user_info = {
'id': user.pk,
'is_authenticated': True,
}
if hasattr(user, 'email'):
user_info['email'] = user.email
if hasattr(user, 'get_username'):
user_info['username'] = user.get_username()
elif hasattr(user, 'username'):
user_info['username'] = user.username
return user_info
def get_data_from_request(self, request):
try:
from django.contrib.auth.models import AbstractBaseUser as BaseUser
except ImportError:
from django.contrib.auth.models import User as BaseUser # NOQA
result = {}
if hasattr(request, 'user') and isinstance(request.user, BaseUser):
result['sentry.interfaces.User'] = self.get_user_info(request.user)
try:
uri = request.build_absolute_uri()
except SuspiciousOperation:
# attempt to build a URL for reporting as Django won't allow us to
# use get_host()
if request.is_secure():
scheme = 'https'
else:
scheme = 'http'
host = get_host(request)
uri = '%s://%s%s' % (scheme, host, request.path)
if request.method != 'GET':
try:
data = request.body
except:
try:
data = request.raw_post_data
except Exception:
# assume we had a partial read.
try:
data = request.POST or '<unavailable>'
except Exception:
data = '<unavailable>'
else:
data = None
environ = request.META
result.update({
'sentry.interfaces.Http': {
'method': request.method,
'url': uri,
'query_string': request.META.get('QUERY_STRING'),
'data': data,
'cookies': dict(request.COOKIES),
'headers': dict(get_headers(environ)),
'env': dict(get_environ(environ)),
}
})
return result
def build_msg(self, *args, **kwargs):
data = super(DjangoClient, self).build_msg(*args, **kwargs)
stacks = (
data.get('sentry.interfaces.Stacktrace'),
data.get('sentry.interfaces.Exception', {}).get('stacktrace'),
)
for stacktrace in filter(bool, stacks):
for frame in stacktrace['frames']:
module = frame.get('module')
if not module:
continue
if module.startswith('django.'):
frame['in_app'] = False
if not self.site and 'django.contrib.sites' in settings.INSTALLED_APPS:
try:
from django.contrib.sites.models import Site
site = Site.objects.get_current()
site_name = site.name or site.domain
data['tags'].setdefault('site', site_name)
except Exception:
# Database error? Fallback to the id
data['tags'].setdefault('site', settings.SITE_ID)
return data
def capture(self, event_type, request=None, **kwargs):
if 'data' not in kwargs:
kwargs['data'] = data = {}
else:
data = kwargs['data']
if request is None:
request = getattr(SentryLogMiddleware.thread, 'request', None)
is_http_request = isinstance(request, HttpRequest)
if is_http_request:
data.update(self.get_data_from_request(request))
if kwargs.get('exc_info'):
exc_value = kwargs['exc_info'][1]
# As of r16833 (Django) all exceptions may contain a ``django_template_source`` attribute (rather than the
# legacy ``TemplateSyntaxError.source`` check) which describes template information.
if hasattr(exc_value, 'django_template_source') or ((isinstance(exc_value, TemplateSyntaxError) and
isinstance(getattr(exc_value, 'source', None), (tuple, list)) and isinstance(exc_value.source[0], LoaderOrigin))):
source = getattr(exc_value, 'django_template_source', getattr(exc_value, 'source', None))
if source is None:
self.logger.info('Unable to get template source from exception')
data.update(get_data_from_template(source))
result = super(DjangoClient, self).capture(event_type, **kwargs)
if is_http_request and result:
# attach the sentry object to the request
request.sentry = {
'project_id': data.get('project', self.project),
'id': self.get_ident(result),
}
return result
def send(self, **kwargs):
"""
Serializes and signs ``data`` and passes the payload off to ``send_remote``
If ``servers`` was passed into the constructor, this will serialize the data and pipe it to
each server using ``send_remote()``. Otherwise, this will communicate with ``sentry.models.GroupedMessage``
directly.
"""
if self.servers:
return super(DjangoClient, self).send(**kwargs)
elif 'sentry' in settings.INSTALLED_APPS:
try:
return self.send_integrated(kwargs)
except Exception as e:
self.error_logger.error(
'Unable to record event: %s\nEvent was: %r', e,
kwargs['message'], exc_info=True)
def send_integrated(self, kwargs):
from sentry.models import Group
return Group.objects.from_kwargs(**kwargs)
| bsd-3-clause | 4,290,953,358,476,939,000 | 34.678947 | 129 | 0.569258 | false |
dallingham/regenerate | regenerate/writers/c_defines.py | 1 | 3670 | #
# Manage registers in a hardware design
#
# Copyright (C) 2008 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
CWriter - Writes out C defines representing the register addresses
"""
from writer_base import WriterBase, ExportInfo
from regenerate.extras import full_token, in_groups
import os
HEADER = [
"/*------------------------------------------------------------------\n",
" * File : $f$\n",
" * Author : $U$\n",
" * Created : $D$\n",
" * Block : $M$\n",
" *\n",
" * -----------------------------------------------------------------\n",
" * Copyright $Y$. All rights reserved.\n",
" *------------------------------------------------------------------\n",
" */\n",
"#ifndef __$F$\n",
"#define __$F$ 1\n",
"\n",
]
TRAILER = ["#endif\n"]
REG_TYPE = {
8: "unsigned char*",
16: "unsigned short*",
32: "unsigned long*",
64: "unsigned long long*",
}
class CDefines(WriterBase):
"""
Writes out C defines representing the register addresses
"""
def __init__(self, project, dbase):
WriterBase.__init__(self, project, dbase)
self._ofile = None
def write_def(self, reg, data, base):
"""
Writes the definition in the format of:
#define register (address)
"""
address = reg.address + base + data.base
if data.repeat > 1:
for i in range(0, data.repeat):
name = full_token(data.group, reg.token,
self._dbase.module_name, i, data.format)
address += (i * data.roffset)
self._ofile.write("#define %-30s (*((volatile %s)0x%x))\n" %
(name, REG_TYPE[reg.width], address))
else:
name = full_token(data.group, reg.token, self._dbase.module_name,
-1, data.format)
self._ofile.write("#define %-30s (*((volatile %s)0x%x))\n" %
(name, REG_TYPE[reg.width], address))
def write(self, filename):
"""
Writes the output file
"""
self._filename = os.path.basename(filename)
with open(filename, "w") as self._ofile:
self.write_header(self._ofile, "".join(HEADER))
addr_maps = self._project.get_address_maps()
if len(addr_maps) > 0:
base = self._project.get_address_base(addr_maps[0].name)
for data in in_groups(self._dbase.module_name, self._project):
for register in self._dbase.get_all_registers():
self.write_def(register, data, base)
self._ofile.write('\n')
for line in TRAILER:
self._ofile.write('%s\n' % line.replace('$M$', self._module))
EXPORTERS = [
(WriterBase.TYPE_BLOCK, ExportInfo(CDefines, ("Header files", "C Source"),
"C header files", ".h", 'headers-c'))
]
| gpl-2.0 | -6,252,778,430,328,302,000 | 32.981481 | 79 | 0.53188 | false |
sh-chris110/chris | python/cube.py | 1 | 5099 | import sys
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from PIL import Image
class MyPyOpenGLTest:
def __init__(self,
width=640,
height=480,
title='MyPyOpenGLTest'.encode()):
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(width, height)
self.window = glutCreateWindow(title)
glutDisplayFunc(self.Draw)
glutIdleFunc(self.Draw)
self.InitGL(width, height)
self.x = 0.0
self.y = 0.0
self.z = 0.0
def Draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslate(0.0, 0.0, -5.0)
glRotatef(self.x, 1.0, 0.0, 0.0)
glRotatef(self.y, 0.0, 1.0, 0.0)
glRotatef(self.z, 0.0, 0.0, 1.0)
glBindTexture(GL_TEXTURE_2D, 0)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0)
glVertex3f(-1.0, -1.0, 1.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(1.0, -1.0, 1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 1)
glBegin(GL_QUADS)
glTexCoord2f(1.0, 0.0)
glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(-1.0, 1.0, -1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(1.0, 1.0, -1.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(1.0, -1.0, -1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 2)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 1.0)
glVertex3f(-1.0, 1.0, -1.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(-1.0, 1.0, 1.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(1.0, 1.0, -1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 3)
glBegin(GL_QUADS)
glTexCoord2f(1.0, 1.0)
glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(1.0, -1.0, -1.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(1.0, -1.0, 1.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(-1.0, -1.0, 1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 4)
glBegin(GL_QUADS)
glTexCoord2f(1.0, 0.0)
glVertex3f(1.0, -1.0, -1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(1.0, 1.0, -1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(0.0, 0.0)
glVertex3f(1.0, -1.0, 1.0)
glEnd()
glBindTexture(GL_TEXTURE_2D, 5)
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0)
glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(1.0, 0.0)
glVertex3f(-1.0, -1.0, 1.0)
glTexCoord2f(1.0, 1.0)
glVertex3f(-1.0, 1.0, 1.0)
glTexCoord2f(0.0, 1.0)
glVertex3f(-1.0, 1.0, -1.0)
glEnd()
glutSwapBuffers()
self.x += 0.4
self.y += 0.6
self.z += 0.2
def LoadTexture(self):
imgFiles = [str(i)+'.jpeg' for i in range(1,7)]
for i in range(6):
img = Image.open(imgFiles[i])
width, height = img.size
img = img.tobytes('raw', 'RGBX', 0, -1)
glGenTextures(2)
glBindTexture(GL_TEXTURE_2D, i)
glTexImage2D(GL_TEXTURE_2D, 0, 4,
width, height, 0, GL_RGBA,
GL_UNSIGNED_BYTE,img)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D,
GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexEnvf(GL_TEXTURE_ENV,
GL_TEXTURE_ENV_MODE, GL_DECAL)
def InitGL(self, width, height):
self.LoadTexture()
glEnable(GL_TEXTURE_2D)
glClearColor(1.0, 1.0, 1.0, 0.0)
glClearDepth(1.0)
glDepthFunc(GL_LESS)
glShadeModel(GL_SMOOTH)
glEnable(GL_CULL_FACE)
glCullFace(GL_BACK)
glEnable(GL_POINT_SMOOTH)
glEnable(GL_LINE_SMOOTH)
glEnable(GL_POLYGON_SMOOTH)
glMatrixMode(GL_PROJECTION)
glHint(GL_POINT_SMOOTH_HINT,GL_NICEST)
glHint(GL_LINE_SMOOTH_HINT,GL_NICEST)
glHint(GL_POLYGON_SMOOTH_HINT,GL_FASTEST)
glLoadIdentity()
gluPerspective(45.0, float(width)/float(height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
def MainLoop(self):
glutMainLoop()
if __name__ == '__main__':
w = MyPyOpenGLTest()
w.MainLoop()
| gpl-2.0 | -7,546,414,518,466,473,000 | 30.670807 | 68 | 0.513434 | false |
m1093782566/openstack_org_ceilometer | ceilometer/openstack/common/fileutils.py | 1 | 4033 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
import tempfile
from oslo.utils import excutils
from ceilometer.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload:
delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_cached_file(filename):
"""Delete cached file if present.
:param filename: filename to delete
"""
global _FILE_CACHE
if filename in _FILE_CACHE:
del _FILE_CACHE[filename]
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return open(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path
| apache-2.0 | 1,907,550,172,417,568,500 | 26.435374 | 78 | 0.661294 | false |
khughitt/cats | test/formatters/test_fasta.py | 1 | 1635 | """
FASTAFormatter tests.
"""
import cats
import io
import os
import pytest
class TestFASTAFormatter:
@pytest.mark.parametrize('filename', [
'dna.fasta',
'dna_grep.fasta',
'dna.txt',
'dna_grep.txt',
'dna_zgrep.txt',
'rna.txt',
'rna_grep.txt',
'protein.fasta'
])
def test_format(self, filename):
"""Test FASTA formatting"""
testdir = os.path.abspath(os.path.join(os.getcwd(),
__file__, '..', '..'))
# input
infile = os.path.join(testdir, 'input', filename)
# output
output = io.StringIO()
cats.format(infile, outbuffer=output, theme='default')
output.seek(0)
with open(os.path.join(testdir, 'output', filename)) as fp:
expected = fp.read()
assert str(output.read()) == expected
@pytest.mark.parametrize('input_file,output_file', [
('dna.fasta', 'dna.fasta-trans')
])
def test_fasta_translate(self, input_file, output_file):
"""Test FASTA DNA->Protein translation"""
testdir = os.path.abspath(os.path.join(os.getcwd(),
__file__, '..', '..'))
# input
infile = os.path.join(testdir, 'input', input_file)
# output
output = io.StringIO()
cats.format(infile, outbuffer=output, theme='default', translate=True)
output.seek(0)
with open(os.path.join(testdir, 'output', output_file)) as fp:
expected = fp.read()
assert str(output.read()) == expected
| bsd-2-clause | 1,545,673,285,899,150,600 | 28.196429 | 78 | 0.529052 | false |
USDA-ARS-NWRC/AWSF | awsm/interface/smrf_ipysnobal.py | 1 | 12538 | """
Functions for running PySnobal as well as SMRF and Pysnobal
threaded together
20170731 Micah Sandusky
"""
from datetime import datetime
import numpy as np
import pandas as pd
import pytz
import smrf.framework
from topocalc.shade import shade
from smrf.envphys import sunang
from smrf.utils import queue
from awsm.interface import ipysnobal, interface, initialize_model as initmodel, \
pysnobal_io as io_mod
from awsm.interface.ingest_data import StateUpdater
from pysnobal.c_snobal import snobal
def run_ipysnobal(myawsm):
"""
Function to run PySnobal from netcdf of ipw forcing data,
not from SMRF instance.
Args:
myawsm: awsm class
"""
# initialize ipysnobal state
# get dem
dem = myawsm.topo.dem
myawsm._logger.info('Initializing from files')
options, params, tstep_info, init, output_rec = \
ipysnobal.init_from_smrf(myawsm, dem=dem)
data_tstep = tstep_info[0]['time_step']
timeSinceOut = 0.0
start_step = 0 # if restart then it would be higher if this were iSnobal
step_time = start_step * data_tstep
output_rec['current_time'] = step_time * \
np.ones(output_rec['elevation'].shape)
output_rec['time_since_out'] = timeSinceOut * \
np.ones(output_rec['elevation'].shape)
myawsm._logger.info('getting inputs for first timestep')
if myawsm.forcing_data_type == 'netcdf':
force = io_mod.open_files_nc(myawsm)
input1 = initmodel.get_timestep_netcdf(
force, options['time']['date_time'][0])
else:
input_list, ppt_list = io_mod.open_files_ipw(myawsm)
input1 = initmodel.get_timestep_ipw(options['time']['date_time'][0],
input_list, ppt_list, myawsm)
# initialize updater if required
if myawsm.update_depth:
updater = StateUpdater(myawsm)
else:
updater = None
myawsm._logger.info('starting PySnobal time series loop')
j = 1
# run PySnobal
for tstep in options['time']['date_time'][1:]:
# for tstep in options['time']['date_time'][953:958]:
myawsm._logger.info('running PySnobal for timestep: {}'.format(tstep))
if myawsm.forcing_data_type == 'netcdf':
input2 = initmodel.get_timestep_netcdf(force, tstep)
else:
input2 = initmodel.get_timestep_ipw(
tstep, input_list, ppt_list, myawsm)
first_step = j
# update depth if necessary
if updater is not None:
if tstep in updater.update_dates:
start_z = output_rec['z_s'].copy()
output_rec = \
updater.do_update_pysnobal(output_rec, tstep)
first_step = 1
rt = snobal.do_tstep_grid(input1, input2, output_rec, tstep_info,
options['constants'], params, first_step=first_step,
nthreads=myawsm.ipy_threads)
if rt != -1:
raise ValueError(
'ipysnobal error on time step %s, pixel %i' % (tstep, rt))
# break
input1 = input2.copy()
# output at the frequency and the last time step
if ((j)*(data_tstep/3600.0) % options['output']['frequency'] == 0) \
or (j == len(options['time']['date_time']) - 1):
myawsm._logger.info('Outputting {}'.format(tstep))
io_mod.output_timestep(output_rec, tstep, options,
myawsm.pysnobal_output_vars)
output_rec['time_since_out'] = np.zeros(
output_rec['elevation'].shape)
myawsm._logger.info('Finished timestep: {}'.format(tstep))
j += 1
# if input has run_for_nsteps, make sure not to go past it
if myawsm.run_for_nsteps is not None:
if j > myawsm.run_for_nsteps:
break
# close input files
if myawsm.forcing_data_type == 'netcdf':
io_mod.close_files(force)
def run_smrf_ipysnobal(myawsm):
"""
Function to run SMRF and pass outputs in memory to python wrapped
iSnobal.
Args:
myawsm: AWSM instance
"""
# first create config to run smrf
smrf_cfg = interface.create_smrf_config(myawsm)
# start = datetime.now()
# initialize
with smrf.framework.SMRF(smrf_cfg, myawsm._logger) as s:
# if input has run_for_nsteps, make sure not to go past it
if myawsm.run_for_nsteps is not None:
change_in_hours = int(myawsm.run_for_nsteps *
s.config['time']['time_step']/60)
# recalculate end_date before initializing run
s.end_date = s.start_date + pd.to_timedelta(change_in_hours - 1,
unit='h')
myawsm.end_date = s.end_date
s.date_time = s.date_time[:myawsm.run_for_nsteps]
s.time_steps = myawsm.run_for_nsteps
# load topo data
s.loadTopo()
# 3. initialize the distribution
s.create_distribution()
# load weather data and station metadata
s.loadData()
# run threaded or not
if s.threading:
run_smrf_ipysnobal_threaded(myawsm, s)
else:
run_smrf_ipysnobal_single(myawsm, s)
s._logger.debug('DONE!!!!')
def run_smrf_ipysnobal_single(myawsm, s):
"""
Running smrf and PySnobal in non-threaded application.
Args:
myawsm: awsm class
s: smrf class
"""
# -------------------------------------
# Initialize the distibution
s.initialize_distribution()
# -------------------------------------
# initialize ipysnobal state
options, params, tstep_info, init, output_rec = \
ipysnobal.init_from_smrf(myawsm, s)
# -------------------------------------
# create variable list
force_variables = ['thermal', 'air_temp', 'vapor_pressure', 'wind_speed',
'net_solar', 'soil_temp', 'precip', 'percent_snow',
'snow_density', 'precip_temp']
# Collect the potential output variables
possible_output_variables = {}
for variable, module in s.distribute.items():
possible_output_variables.update(module.output_variables)
variable_list = {}
for force_variable in force_variables:
if force_variable in possible_output_variables.keys():
module = possible_output_variables[force_variable]['module']
variable_list[force_variable] = {
'variable': force_variable,
'module': module
}
else:
raise ValueError('Not distributing necessary '
'variables to run PySnobal!')
# -------------------------------------
# initialize updater if required
if myawsm.update_depth:
updater = StateUpdater(myawsm)
else:
updater = None
# initialize pysnobal run class
my_pysnobal = ipysnobal.PySnobal(s.date_time,
variable_list,
myawsm.pysnobal_output_vars,
options,
params,
tstep_info,
init,
output_rec,
s.topo.nx,
s.topo.ny,
myawsm.soil_temp,
myawsm._logger,
myawsm.tzinfo)
# -------------------------------------
# Distribute the data
for output_count, t in enumerate(s.date_time):
# wait here for the model to catch up if needed
startTime = datetime.now()
s._logger.info('Distributing time step %s' % t)
# 0.1 sun angle for time step
cosz, azimuth, rad_vec = sunang.sunang(
t.astimezone(pytz.utc),
s.topo.basin_lat,
s.topo.basin_long,
)
# 0.2 illumination angle
illum_ang = None
if cosz > 0:
illum_ang = shade(
s.topo.sin_slope,
s.topo.aspect,
azimuth,
cosz)
# 1. Air temperature
s.distribute['air_temp'].distribute(s.data.air_temp.loc[t])
# 2. Vapor pressure
s.distribute['vapor_pressure'].distribute(
s.data.vapor_pressure.loc[t],
s.distribute['air_temp'].air_temp)
# 3. Wind_speed and wind_direction
s.distribute['wind'].distribute(
s.data.wind_speed.loc[t],
s.data.wind_direction.loc[t],
t)
# 4. Precipitation
s.distribute['precipitation'].distribute(
s.data.precip.loc[t],
s.distribute['vapor_pressure'].dew_point,
s.distribute['vapor_pressure'].precip_temp,
s.distribute['air_temp'].air_temp,
t,
s.data.wind_speed.loc[t],
s.data.air_temp.loc[t],
s.distribute['wind'].wind_direction,
s.distribute['wind'].wind_model.dir_round_cell,
s.distribute['wind'].wind_speed,
s.distribute['wind'].wind_model.cellmaxus
)
# 5. Albedo
s.distribute['albedo'].distribute(
t,
illum_ang,
s.distribute['precipitation'].storm_days
)
# 6. cloud factor
s.distribute['cloud_factor'].distribute(s.data.cloud_factor.loc[t])
# 7. solar
s.distribute['solar'].distribute(
t,
s.distribute["cloud_factor"].cloud_factor,
illum_ang,
cosz,
azimuth,
s.distribute['albedo'].albedo_vis,
s.distribute['albedo'].albedo_ir)
# 7. thermal radiation
if s.distribute['thermal'].gridded and \
s.config['gridded']['data_type'] != 'hrrr_grib':
s.distribute['thermal'].distribute_thermal(
s.data.thermal.loc[t],
s.distribute['air_temp'].air_temp)
else:
s.distribute['thermal'].distribute(
t,
s.distribute['air_temp'].air_temp,
s.distribute['vapor_pressure'].vapor_pressure,
s.distribute['vapor_pressure'].dew_point,
s.distribute['cloud_factor'].cloud_factor)
# 8. Soil temperature
s.distribute['soil_temp'].distribute()
# 9. pass info to PySnobal
if output_count == 0:
my_pysnobal.run_single_fist_step(s)
elif output_count > 0:
my_pysnobal.run_single(t, s, updater)
else:
raise ValueError('Problem with times in run ipysnobal single')
telapsed = datetime.now() - startTime
s._logger.debug('{0:.2f} seconds for time step'
.format(telapsed.total_seconds()))
s.forcing_data = 1
def run_smrf_ipysnobal_threaded(myawsm, s):
"""
Function to run SMRF (threaded) and pass outputs in memory to python wrapped
iSnobal. iPySnobal has replaced the output queue in this implimentation.
Args:
myawsm: AWSM instance
s: SMRF instance
"""
# initialize ipysnobal state
options, params, tstep_info, init, output_rec = \
ipysnobal.init_from_smrf(myawsm, s)
s.create_data_queue()
s.set_queue_variables()
s.create_distributed_threads(['isnobal'])
s.smrf_queue['isnobal'] = queue.DateQueueThreading(
s.queue_max_values,
s.time_out,
name='isnobal')
del s.smrf_queue['output']
# initialize updater if required
if myawsm.update_depth:
updater = StateUpdater(myawsm)
else:
updater = None
# isnobal thread
s.threads.append(ipysnobal.QueueIsnobal(
s.smrf_queue,
s.date_time,
s.thread_queue_variables,
myawsm.pysnobal_output_vars,
options,
params,
tstep_info,
init,
output_rec,
s.topo.nx,
s.topo.ny,
myawsm.soil_temp,
myawsm._logger,
myawsm.tzinfo,
updater))
# the cleaner
s.threads.append(queue.QueueCleaner(s.date_time, s.smrf_queue))
# start all the threads
for i in range(len(s.threads)):
s.threads[i].start()
for i in range(len(s.threads)):
s.threads[i].join()
| gpl-3.0 | -4,230,425,102,818,562,600 | 30.822335 | 86 | 0.544664 | false |
XianliangJ/collections | DCTCPTest/plot_queue.py | 1 | 2255 | '''
Plot queue occupancy over time
'''
from helper import *
import plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument('--files', '-f',
help="Queue timeseries output to one plot",
required=True,
action="store",
nargs='+',
dest="files")
parser.add_argument('--legend', '-l',
help="Legend to use if there are multiple plots. File names used as default.",
action="store",
nargs="+",
default=None,
dest="legend")
parser.add_argument('--out', '-o',
help="Output png file for the plot.",
default=None, # Will show the plot
dest="out")
parser.add_argument('--labels',
help="Labels for x-axis if summarising; defaults to file names",
required=False,
default=[],
nargs="+",
dest="labels")
parser.add_argument('--every',
help="If the plot has a lot of data points, plot one of every EVERY (x,y) point (default 1).",
default=1,
type=int)
args = parser.parse_args()
if args.legend is None:
args.legend = []
for file in args.files:
args.legend.append(file)
to_plot=[]
def get_style(i):
if i == 0:
return {'color': 'red'}
else:
return {'color': 'blue', 'ls': '-.'}
m.rc('figure', figsize=(16, 6))
fig = figure()
ax = fig.add_subplot(111)
for i, f in enumerate(args.files):
data = read_list(f)
xaxis = map(float, col(0, data))
start_time = xaxis[0]
xaxis = map(lambda x: x - start_time, xaxis)
qlens = map(float, col(1, data))
xaxis = xaxis[::args.every]
qlens = qlens[::args.every]
ax.plot(xaxis, qlens, lw=2, **get_style(i))
ax.xaxis.set_major_locator(MaxNLocator(4))
plt.legend(args.legend)
plt.ylabel("Queue occupancy (bytes)")
plt.grid(True)
plt.xlabel("Time elapsed (in sec)")
if args.out:
print 'saving to', args.out
plt.savefig(args.out)
else:
plt.show()
| gpl-3.0 | 1,796,765,571,080,518,000 | 26.839506 | 114 | 0.534812 | false |
diggcoin/diggcoin | qa/rpc-tests/p2p-acceptblock.py | 1 | 12336 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("DIGGCOIND", "diggcoind"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| mit | -6,203,074,944,724,320,000 | 41.537931 | 107 | 0.634484 | false |
hunter-87/binocular-dense-stereo | sfm_templeRing.py | 1 | 4749 | import cv2
import numpy as np
import scipy.spatial
#!/usr/bin/env python
#coding: utf8
import os
from matplotlib.pyplot import subplot
import matplotlib.pyplot as plt
# figsize(12,8)
T1 = cv2.imread('../dataset_templeRing/templeR0034.png', cv2.IMREAD_GRAYSCALE)
sift = cv2.SIFT(nfeatures=5000)
kpts1, D_i = sift.detectAndCompute(T1, mask=None)
K1 = np.array([[k.pt[0], k.pt[1]] for k in kpts1])
T2 = cv2.imread('../dataset_templeRing/templeR0036.png', cv2.IMREAD_GRAYSCALE)
sift = cv2.SIFT(nfeatures=5000)
kpts2, D_j = sift.detectAndCompute(T2, mask=None)
K2 = np.array([[k.pt[0], k.pt[1]] for k in kpts2])
subplot(1,2,1)
cv2.plot(K1[:,0], K1[:,1], 'rx')
cv2.imshow(T1, cmap=np.cm.gray)
cv2.title('Temple 34')
subplot(1,2,2)
cv2.plot(K2[:,0], K2[:,1], 'rx')
cv2.imshow(T2, cmap=np.cm.gray)
cv2.title('Temple 36')
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
pca.fit(D_i)
D_i = pca.transform(D_i)
D_j = pca.transform(D_j)
import scipy.spatial
kdtree_j = scipy.spatial.cKDTree(D_j)
N_i = D_i.shape[0]
d, nn = kdtree_j.query(D_i, k=2)
ratio_mask = d[:,0]/d[:,1] < 0.6
m = np.vstack((np.arange(N_i), nn[:,0])).T
m = m[ratio_mask]
# Filtering: If more than one feature in I matches the same feature in J,
# we remove all of these matches
h = {nj:0 for nj in m[:,1]}
for nj in m[:,1]:
h[nj] += 1
m = np.array([(ni, nj) for ni, nj in m if h[nj] == 1])
def rcolor():
return (np.random.rand(),np. random.rand(), np.random.rand())
def show_matches(matches):
n_rows, n_cols = T1.shape
display = np.zeros( (n_rows, 2 * n_cols), dtype=np.uint8 )
display[:,0:n_cols] = T1
display[:,n_cols:] = T2
for pi, pj in matches:
cv2.plot([K1[pi][0], K2[pj][0] + n_cols],
[K1[pi][1], K2[pj][1]],
marker='o', linestyle='-', color=rcolor())
cv2.imshow(display, cmap=np.cm.gray)
show_matches(m)
xi = K1[m[:,0],:]
xj = K2[m[:,1],:]
F, status = cv2.findFundamentalMat(xi, xj, cv2.FM_RANSAC, 0.5, 0.9)
assert(np.det(F) < 1.e-7)
is_inlier = np.array(status == 1).reshape(-1)
inlier_i = xi[is_inlier]
inlier_j = xj[is_inlier]
hg = lambda x : np.array([x[0], x[1], 1])
K = np.array([[1520.4, 0., 302.32],
[0, 1525.9, 246.87],
[0, 0, 1]])
E = np.dot(K.T, np.dot(F, K))
U, s, VT = np.linalg.svd(E)
if np.det(np.dot(U, VT)) < 0:
VT = -VT
E = np.dot(U, np.dot(np.diag([1,1,0]), VT))
V = VT.T
# Let's check Nister (2004) Theorem 3 constraint:
assert(np.det(U) > 0)
assert(np.det(V) > 0)
# Nister (2004) Theorem 2 ("Essential Condition")
assert sum(np.dot(E, np.dot(E.T, E)) - 0.5 * np.trace(np.dot(E, E.T)) * E) < 1.0e-10
def dlt_triangulation(ui, Pi, uj, Pj):
"""Hartley & Zisserman, 12.2"""
ui /= ui[2]
xi, yi = ui[0], ui[1]
uj /= uj[2]
xj, yj = uj[0], uj[1]
a0 = xi * Pi[2,:] - Pi[0,:]
a1 = yi * Pi[2,:] - Pi[1,:]
a2 = xj * Pj[2,:] - Pj[0,:]
a3 = yj * Pj[2,:] - Pj[1,:]
A = np.vstack((a0, a1, a2, a3))
U, s, VT = np.linalg.svd(A)
V = VT.T
X3d = V[:,-1]
return X3d/X3d[3]
def depth(X, P):
T = X[3]
M = P[:,0:3]
p4 = P[:,3]
m3 = M[2,:]
x = np.dot(P, X)
w = x[2]
X = X/w
return (np.sign(np.det(M)) * w) / (T*np.norm(m3))
def get_proj_matrices(E, K, xi, xj):
hg = lambda x : np.array([x[0], x[1], 1])
W = np.array([[0., -1., 0.],
[1., 0., 0.],
[0., 0., 1.]])
Pi = np.dot(K, np.hstack( (np.identity(3), np.zeros((3,1))) ))
U, s, VT = np.linalg.svd(E)
u3 = U[:,2].reshape(3,1)
# Candidates
Pa = np.dot(K, np.hstack((np.dot(U, np.dot(W ,VT)), u3)))
Pb = np.dot(K, np.hstack((np.dot(U, np.dot(W ,VT)), -u3)))
Pc = np.dot(K, np.hstack((np.dot(U, np.dot(W.T ,VT)), u3)))
Pd = np.dot(K, np.hstack((np.dot(U, np.dot(W.T ,VT)), -u3)))
# Find the camera for which the 3D points are *in front*
xxi, xxj = hg(xi[0]), hg(xj[0])
Pj = None
for Pk in [Pa, Pb, Pc, Pd]:
Q = dlt_triangulation(xxi, Pi, xxj, Pk)
if depth(Q, Pi) > 0 and depth(Q, Pk) > 0:
Pj = Pk
break
assert(Pj is not None)
return Pi, Pj
P1, P2 = get_proj_matrices(E, K, inlier_i, inlier_j)
X = []
for xxi, xxj in zip(inlier_i, inlier_j):
X_k = dlt_triangulation(hg(xxi), P1, hg(xxj), P2)
X.append(X_k)
X = np.array(X)
num_pix = X.shape[0]
pix_color = [rcolor() for k in range(num_pix)]
pix = np.dot(P2, X.T).T
pix = np.divide(pix, pix[:,2].reshape(num_pix, -1))
from mpl_toolkits.mplot3d import Axes3D
fig = cv2.figure()
subplot(1,2,1)
for k in range(num_pix):
cv2.plot(pix[k,0], pix[k,1], color=pix_color[k], marker='o')
cv2.imshow(T1, cmap=np.cm.gray)
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.scatter(X[:,0], X[:,1], X[:,2], zdir='z', c=pix_color) | gpl-2.0 | -5,787,413,777,145,128,000 | 25.836158 | 84 | 0.562855 | false |
jeffpiazza/derbynet | extras/scripts/lib/read_barcode.py | 1 | 2226 | #!/usr/bin/env python
import struct
import sys
scanner_device = sys.argv[1]
# Values taken from include/uapi/linux/input-event-codes.h
keys = {
2: '1', 3: '2', 4: '3', 5: '4', 6: '5', 7: '6', 8: '7', 9: '8', 10: '9', 11: '0', \
12: '-', 13: '=', \
16: 'q', 17: 'w', 18: 'e', 19: 'r', 20: 't', 21: 'y', 22: 'u', 23: 'i', 24: 'o', 25: 'p', \
26: '[', 27: ']', \
30: 'a', 31: 's', 32: 'd', 33: 'f', 34: 'g', 35: 'h', 36: 'j', 37: 'k', 38: 'l', 39: ';', \
40: '\'', 43: '\\', \
44: 'z', 45: 'x', 46: 'c', 47: 'v', 48: 'b', 49: 'n', 50: 'm', 51: ',', 52: '.', 53: '/', \
57: ' ' }
shift_keys = {
2: '!', 3: '@', 4: '#', 5: '$', 6: '%', 7: '^', 8: '&', 9: '*', 10: '(', 11: ')', \
12: '_', 13: '+', \
16: 'Q', 17: 'W', 18: 'E', 19: 'R', 20: 'T', 21: 'Y', 22: 'U', 23: 'I', 24: 'O', 25: 'P', \
26: '{', 27: '}', \
30: 'A', 31: 'S', 32: 'D', 33: 'F', 34: 'G', 35: 'H', 36: 'J', 37: 'K', 38: 'L', 39: ';', \
40: '\"', 43: '|', \
44: 'Z', 45: 'X', 46: 'C', 47: 'V', 48: 'B', 49: 'N', 50: 'M', 51: '<', 52: '>', 53: '?', \
57: ' ' }
KEY_ENTER = 28
KEY_LEFTSHIFT = 42
KEY_RIGHTSHIFT = 54
EV_VALUE_KEY_RELEASED = 0
EV_VALUE_KEY_PRESSED = 1
EV_VALUE_KEY_AUTOREPEAT = 2
EV_KEY = 1
# EV_SYN = 0
# EV_MSC = 4
# 4IHHI on 64-bit machines; each of the other INEV_ indices would increase by 2
INEV_STRUCT = '2IHHI'
# Offsets in the input_event struct
#INEV_XX0 = 0
#INEV_XX1 = 1
INEV_TYPE = 2
INEV_CODE = 3
INEV_VALUE = 4
ss = ""
with open(scanner_device, 'rb') as fp:
shift = False
done = False
while not done:
buffer = fp.read(struct.calcsize(INEV_STRUCT))
ev = struct.unpack(INEV_STRUCT, buffer)
if ev[INEV_TYPE] != EV_KEY:
continue
is_keypress = ev[INEV_VALUE] == EV_VALUE_KEY_PRESSED or \
ev[INEV_VALUE] == EV_VALUE_KEY_AUTOREPEAT
# print ev
if ev[INEV_CODE] == KEY_LEFTSHIFT or ev[INEV_CODE] == KEY_RIGHTSHIFT:
shift = is_keypress
elif is_keypress:
if ev[INEV_CODE] == KEY_ENTER:
done = True
elif shift and ev[INEV_CODE] in shift_keys:
ss += shift_keys[ev[INEV_CODE]]
elif ev[INEV_CODE] in keys:
ss += keys[ev[INEV_CODE]]
print ss
| mit | -1,319,456,822,116,887,600 | 29.081081 | 91 | 0.469901 | false |
jbloom/mapmuts | tests/test_pyPdf.py | 1 | 1037 | """Tests for availability of ``pyPdf``.
Written by Jesse Bloom, 2013.
"""
import sys
import unittest
import mapmuts.weblogo
class TestPyPdfAvailable(unittest.TestCase):
"""Tests for availability of ``pyPdf``."""
def test_Import(self):
"""Is ``pyPdf`` available?
"""
sys.stderr.write("\nTesting if pyPdf is available...")
failmsg = 'FAILED to find pyPdf in current search path. '\
+ 'Creation of sequence logos with overlays will not'\
+ ' be supported. Other aspects of '\
+ 'the mapmuts package can still be used.\n'
succeedmsg = 'weblogo is available, sequence logos with overlays can be '\
+ 'created.'
if mapmuts.weblogo.PyPdfAvailable():
sys.stderr.write(succeedmsg)
else:
sys.stderr.write(failmsg)
self.assertTrue(mapmuts.weblogo.PyPdfAvailable(), msg=failmsg)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
unittest.main(testRunner=runner)
| gpl-3.0 | -3,047,990,469,092,769,000 | 30.424242 | 82 | 0.613308 | false |
rwth-ti/gr-ofdm | python/ofdm/qa_midamble_insert.py | 1 | 1229 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ofdm_swig as ofdm
class qa_midamble_insert (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_midamble_insert, "qa_midamble_insert.xml")
| gpl-3.0 | 670,335,775,076,355,300 | 28.97561 | 70 | 0.692433 | false |
kubeflow/kfserving | docs/samples/v1beta1/triton/torchscript/image_transformer_v2/image_transformer_v2.py | 1 | 1981 | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfserving
from typing import Dict
from PIL import Image
import torchvision.transforms as transforms
import logging
import io
import numpy as np
import base64
logging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def image_transform(instance):
byte_array = base64.b64decode(instance['image_bytes']['b64'])
image = Image.open(io.BytesIO(byte_array))
a = np.asarray(image)
im = Image.fromarray(a)
res = transform(im)
logging.info(res)
return res.tolist()
class ImageTransformerV2(kfserving.KFModel):
def __init__(self, name: str, predictor_host: str, protocol: str):
super().__init__(name)
self.predictor_host = predictor_host
self.protocol = protocol
def preprocess(self, inputs: Dict) -> Dict:
return {
'inputs': [
{
'name': 'INPUT__0',
'shape': [1, 3, 32, 32],
'datatype': "FP32",
'data': [image_transform(instance) for instance in inputs['instances']]
}
]
}
def postprocess(self, results: Dict) -> Dict:
return {output["name"]: np.array(output["data"]).reshape(output["shape"]).tolist()
for output in results["outputs"]}
| apache-2.0 | -6,574,397,262,375,359,000 | 31.47541 | 90 | 0.647148 | false |
ipselium/cpyvke | cpyvke/objects/panel.py | 1 | 28277 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © 2016-2018 Cyril Desjouy <ipselium@free.fr>
#
# This file is part of cpyvke
#
# cpyvke is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cpyvke is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with cpyvke. If not, see <http://www.gnu.org/licenses/>.
#
#
# Creation Date : Mon Nov 14 09:08:25 2016
# Last Modified : mar. 03 avril 2018 15:55:45 CEST
"""
-----------
DOCSTRING
@author: Cyril Desjouy
"""
import time
import locale
import curses
import abc
from curses import panel
from math import ceil
from cpyvke.curseswin.widgets import Help
from cpyvke.curseswin.prompt import Prompt
from cpyvke.curseswin.app import check_size
from cpyvke.utils.kd import restart_daemon
from cpyvke.utils.display import format_cell
from cpyvke.utils.comm import send_msg
code = locale.getpreferredencoding()
class BasePanel(abc.ABC):
""" Generic Panel.
"""
def __init__(self, app, sock, logger):
""" Class Constructor """
# Instance arguments
self.app = app
self.sock = sock
self.logger = logger
# Init constants
self.resize = False
self.pkey = -1
# Init Prompt
self.prompt = Prompt(self.app)
self.prompt_time = 0
self.prompt_msg = ''
# Update dimensions
self.screen_height, self.screen_width = self.app.stdscr.getmaxyx() # Local dimensions
# Init subwin
self.gwin = self.app.stdscr.subwin(self.app.panel_height, self.app.screen_width, 0, 0)
self.gwin.keypad(1)
# Init Panel
self.gpan = panel.new_panel(self.gwin)
self.gpan.hide()
@property
@abc.abstractmethod
def title(self):
""" Panel title. Must return a string """
@property
@abc.abstractmethod
def panel_name(self):
""" Panel reference name. Must return a string """
@abc.abstractmethod
def color(self, item):
""" Panel colors. Required :
* for BasePanel : 'txt', 'bdr', 'ttl', 'hh', 'pwf'
* for ListPanel : 'co', 'al', 'di'
"""
@abc.abstractmethod
def fill_main_box(self):
""" Fill the main box """
def display(self):
""" Display the panel. """
try:
self.pkey = -1
while self.app.close_signal == 'continue':
self.tasks()
self.app.shutdown()
except Exception:
self.app.exit_with_error()
@check_size
def tasks(self):
""" List of tasks at each iteration """
self.resize_curses()
# Check switch panel
if self.app.explorer_switch:
self.app.explorer_switch = False
self.app.kernel_win.display()
self.resize_curses(True)
elif self.app.kernel_switch:
self.app.kernel_switch = False
self.app.explorer_win.display()
self.resize_curses(True)
else:
# Check Connection to daemon
self.sock.check_main_socket()
# Keys
self.common_key_bindings()
# Decrease delay right here to avoid some waiting at the getch when not
# in switch mode. If switch, the halfdelay is set to its normal value
# just after, in the refresh() method !
curses.halfdelay(1)
# Skip end of tasks if switching panel !
if not self.app.explorer_switch and not self.app.kernel_switch and self.app.close_signal == "continue":
# Update screen size if another menu break because of resizing
self.resize_curses()
# Update all static panels
self.refresh()
# Get pressed key (even in case of switch)
self.pkey = self.app.stdscr.getch()
def refresh(self):
""" Refresh all objects. """
# Erase all windows
self.gwin.erase()
self.app.stdscr.erase()
# Create border before updating fields
self.gwin.border(0)
# Fill the main box !
self.fill_main_box()
# Update all windows
if self.app.debug:
self.app.dbg_pad(self.pkey)
# Update infos -- Bottom
self.app.status_bar()
self.prompt_msg_display()
self.app.stdscr.refresh()
self.gwin.refresh()
# Reactive timeout for getch
curses.halfdelay(self.app.curse_delay)
def common_key_bindings(self):
""" Common key bindings """
# Custom key bindings
self.custom_key_bindings()
# Socket actions
self.socket_key_bindings()
# Item list
self.list_key_bindings()
# Menu Help
if self.pkey == 63: # -> ?
help_menu = Help(self.app)
help_menu.display()
# Prompt
elif self.pkey == 58: # -> :
self.cmd = self.prompt.with_completion(chr(self.pkey))
self.prompt_cmd()
# Send code
elif self.pkey == 120: # -> x
self.send_code()
# Debug Pad
elif self.pkey == 100: # -> d
self.toggle_debug()
def socket_key_bindings(self):
""" Socket actions key bindings. """
if self.pkey == 82: # -> R
self.daemon_restart_connection()
elif self.pkey == 68: # -> D
self.daemon_disconnect()
elif self.pkey == 67: # -> C
self.daemon_connect()
elif self.pkey == 18: # -> c-r
self.daemon_restart()
def list_key_bindings(self):
""" Not available for BasePanel. See List ListPanel """
pass
def custom_key_bindings(self):
""" Key bindings : To overload """
pass
def prompt_msg_display(self):
""" Erase prompt message after some delay """
if self.prompt_msg and time.time() - self.prompt_time > 3:
self.prompt_msg = ''
else:
self.prompt.display(self.prompt_msg)
def prompt_msg_setup(self, msg):
""" Set up the message to display in the prompt """
self.prompt_msg = msg
self.prompt_time = time.time()
def prompt_cmd(self):
""" Actions for prompt """
if not self.cmd:
pass
elif self.cmd in ["q", "quit"]:
self.app.close_signal = 'close'
elif self.cmd in ["Q", "Quit"]:
self.app.close_signal = 'shutdown'
elif self.cmd in ['k', 'K', 'kernel-manager']:
self.prompt_cmd_kernel_manager()
elif self.cmd in ['v', 'V', 'e', 'E', 'variable-explorer']:
self.prompt_cmd_variable_explorer()
elif self.cmd in ['h', 'help']:
help_menu = Help(self.app)
help_menu.display()
elif self.cmd in ['R', 'daemon-restart']:
self.daemon_restart()
elif self.cmd in ['r', 'daemon-restart-connection']:
self.daemon_restart_connection()
elif self.cmd in ['c', 'daemon-connect']:
self.daemon_connect()
elif self.cmd in ['d', 'daemon-disconnect']:
self.daemon_disconnect()
elif self.cmd in ['toggle-debug']:
self.toggle_debug()
else:
self.prompt_msg_setup('Command not found !')
def prompt_cmd_kernel_manager(self):
""" 'kernel-manager' prompt command"""
if self.panel_name in ['variable-explorer']:
self.app.explorer_switch = True
elif self.panel_name not in ['kernel-manager']:
self.app.kernel_win.display()
else:
self.prompt_msg_setup('Already in kernel manager !')
def prompt_cmd_variable_explorer(self):
""" 'variable-explorer' prompt command """
if self.panel_name in ['kernel-manager']:
self.app.kernel_switch = True
elif self.panel_name not in ['variable-explorer']:
self.app.explorer_win.display()
else:
self.prompt_msg_setup('Already in variable explorer !')
def toggle_debug(self):
""" Display/hide debug informations """
if self.app.debug:
self.app.debug = False
else:
self.app.debug = True
def send_code(self):
""" Send code to current kernel """
code = self.prompt.simple('Send-code ')
code, err = self.check_code(code)
if err:
self.prompt_msg_setup(err)
elif code:
try:
send_msg(self.sock.RequestSock, '<code>' + code)
self.logger.info('Code sent to kernel : {}'.format(code))
self.prompt_msg_setup('Code sent !')
except Exception:
self.logger.error('Code not sent !')
self.prompt_msg_setup('Code not sent !')
@staticmethod
def check_code(code):
""" Check is code is authorized """
if 'input' in code:
return '', 'input command is not allowed'
elif 'reset' in code:
return 'reset -f', 'Resetting namespace...'
else:
return code, None
def daemon_connect(self):
""" Connect to daemon socket """
self.sock.init_sockets()
self.sock.warning_socket(self.app.wng)
def daemon_disconnect(self):
""" Disconnet from daemon socket """
self.sock.close_sockets()
self.sock.warning_socket(self.app.wng)
def daemon_restart_connection(self):
""" Restart connection to the daemon socket """
self.app.wng.display(' Restarting connection ')
self.sock.restart_sockets()
self.sock.warning_socket(self.app.wng)
def daemon_restart(self):
""" Restart kd5 ! """
restart_daemon()
self.app.wng.display(' Restarting Daemon ')
self.sock.init_sockets()
self.sock.warning_socket(self.app.wng)
def resize_curses(self, force=False):
""" Check if terminal is resized and adapt screen """
# Check difference between self.screen_height and self.app.screen_height
resize = curses.is_term_resized(self.screen_height, self.screen_width)
if resize or force:
# save also these value locally to check if
self.screen_height, self.screen_width = self.app.stdscr.getmaxyx()
# Update display
self.app.stdscr.clear()
self.gwin.clear()
self.gwin.resize(self.app.panel_height, self.app.screen_width)
curses.resizeterm(self.app.screen_height, self.app.screen_width)
self.app.stdscr.refresh()
self.gwin.refresh()
class ListPanel(BasePanel):
""" Generic Panel for lists with menu.
"""
def __init__(self, app, sock, logger):
""" Class Constructor """
super(ListPanel, self).__init__(app, sock, logger)
# Some variables
self.filter = None
self.mk_sort = 'name'
self.search = None
self.search_index = 0
self.search_lst = []
self.limit_msg = ''
self.position = 0
self.page = 1
# Init variables :
self.item_dic = {}
self.item_keys = []
@property
@abc.abstractmethod
def empty(self):
""" Text for empty list. Must return a string """
return
def display(self):
""" Display the panel. """
# Init colors
self.gwin.bkgd(self.color('txt'))
self.gwin.attrset(self.color('bdr'))
self.gpan.top() # Push the panel to the bottom of the stack.
self.gpan.show() # Display the panel
self.gwin.clear()
# Update size if it has change when panel was hidden
self.resize_curses(True)
self.pkey = -1
while self.pkey not in self.app.kquit and self.app.close_signal == 'continue':
if self.app.kernel_switch or self.app.explorer_switch:
break
self.tasks()
self.gwin.clear()
self.gpan.hide()
def custom_tasks(self):
""" Supplementary tasks [To overload if needed] """
pass
@check_size
def tasks(self):
""" List of tasks at each iteration """
# Listen to resize and adapt Curses
self.resize_curses()
# Custom tasks
self.custom_tasks()
# Check Connection to daemon
self.sock.check_main_socket()
# Get items
self.item_dic = self.get_items()
self.row_num = len(self.item_dic) - 1
# Arange item list
self.arange_lst()
# Key bindings
self.common_key_bindings()
if not self.app.kernel_switch and not self.app.explorer_switch and self.app.close_signal == "continue":
# Navigate in the variable list window
self.navigate_lst()
# Update screen size
self.resize_curses()
# Update all
self.refresh()
# Get key
self.pkey = self.app.stdscr.getch()
def refresh(self):
""" Refresh all objects. """
# Erase all windows
self.gwin.erase()
self.app.stdscr.erase()
# Create border before updating fields
self.gwin.border(0)
# Fill the main box !
self.fill_main_box()
# Update all windows
if self.app.debug:
self.app.dbg_pad(self.pkey, self.search, self.filter, self.mk_sort)
# Update infos -- Bottom
self.app.status_bar()
self.prompt_msg_display()
self.app.stdscr.refresh()
self.gwin.refresh()
# Reactive timeout for getch
curses.halfdelay(self.app.curse_delay)
def list_key_bindings(self):
""" Actions linked to list of item. """
# Menu Search
if self.pkey == 47: # -> /
self.search_item('Search for : ')
# Next item (search)
if self.pkey == 110: # -> n
self.search_item_next()
# Sort variable by name/type
elif self.pkey == 115: # -> s
if self.mk_sort == 'name':
self.mk_sort = 'type'
elif self.mk_sort == 'type':
self.mk_sort = 'name'
self.arange_lst()
# Filter variables
elif self.pkey == 102: # -> f
self.filter = self.prompt.simple('Limit to : ')
if self.filter:
self.mk_sort = 'filter'
self.position = 0
self.page = 1
self.arange_lst()
else:
self.filter = None
# Reinit
elif self.pkey == 117: # -> u
self.mk_sort = 'name'
self.limit_msg = ''
self.position = 0
self.page = 1
self.arange_lst()
# Panel Menu
elif self.pkey in self.app.kenter and self.row_num != -1:
self.init_menu()
def custom_key_bindings(self):
""" Key bindings : To overload """
pass
@abc.abstractmethod
def get_items(self):
""" Return a dicionnary with items : self.item_dic """
return
def fill_main_box(self):
""" Update the item list """
# Title
if self.app.config['font']['pw-font'] == 'True':
self.gwin.addstr(0, int((self.app.screen_width-len(self.title))/2),
'', self.color('pwf'))
self.gwin.addstr(self.title, self.color('ttl'))
self.gwin.addstr('', self.color('pwf'))
else:
self.gwin.addstr(0, int((self.app.screen_width-len(self.title))/2),
'|' + self.title + '|', self.color('ttl'))
# Reset position if position is greater than the new list of var (reset)
self.row_num = len(self.item_keys) - 1
if self.position > self.row_num:
self.position = 0
self.page = 1
# Items
for i in range(self.app.row_max*(self.page-1),
self.app.row_max + self.app.row_max*(self.page-1)):
if self.row_num == -1:
self.gwin.addstr(1, 1, self.empty, self.color('hh'))
elif i <= self.row_num:
self.cell1, self.cell2 = format_cell(self.item_dic, self.item_keys[i], self.app.screen_width)
if i == self.position:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), 2,
self.cell1.encode(code), self.color('hh'))
self.fill_main_box_type_selected(i)
else:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), 2,
self.cell1.encode(code), curses.A_DIM | self.color('txt'))
self.fill_main_box_type(i)
# Bottom info
if self.app.config['font']['pw-font'] == 'True' and len(self.limit_msg) > 0:
self.gwin.addstr(self.app.panel_height-1,
int((self.app.screen_width-len(self.limit_msg))/2),
'', self.color('pwf'))
self.gwin.addstr(self.limit_msg, self.color('ttl'))
self.gwin.addstr('', self.color('pwf'))
elif len(self.limit_msg) > 0:
self.gwin.addstr(self.app.panel_height-1,
int((self.app.screen_width-len(self.limit_msg))/2),
'< ' + self.limit_msg + ' >', curses.A_DIM | self.color('ttl'))
self.app.stdscr.refresh()
self.gwin.refresh()
def fill_main_box_type_selected(self, i):
if "[Died]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('di'))
elif "[Alive]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('al'))
elif "[Connected]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('co'))
else:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('hh'))
def fill_main_box_type(self, i):
if "[Died]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('di'))
elif "[Alive]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('al'))
elif "[Connected]" in self.cell2:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('co'))
else:
self.gwin.addstr(i + 1 - self.app.row_max*(self.page-1), len(self.cell1),
self.cell2, self.color('txt'))
@staticmethod
def filter_var_lst(item_dic, filt):
""" Filter variable list (name|type). """
filtered = []
for key in list(item_dic):
if filt in item_dic[key]['type'] or filt in key:
filtered.append(key)
return sorted(filtered)
@staticmethod
def type_sort(item_dic):
""" Sort variable by type. """
from operator import itemgetter
types = []
for key in list(item_dic):
types.append([key, item_dic[key]['type']])
types.sort(key=itemgetter(1))
return [item[0] for item in types]
def arange_lst(self):
""" Organize/Arange variable list. """
if self.mk_sort == 'name':
self.item_keys = sorted(list(self.item_dic))
elif self.mk_sort == 'type':
self.item_keys = self.type_sort(self.item_dic)
elif self.mk_sort == 'filter' and self.filter:
self.item_keys = self.filter_var_lst(self.item_dic, self.filter)
if not self.item_keys:
self.prompt_msg_setup('{} not found'.format(self.filter))
self.item_keys = sorted(list(self.item_dic))
self.filter = None
self.mk_sort = 'name'
else:
self.limit_msg = ' Filter : {} ({} obj.) '.format(self.filter, len(self.item_keys))
else:
self.item_keys = list(self.item_dic)
# Update number of columns
self.row_num = len(self.item_keys) - 1
def search_item(self, txt_msg):
""" Search an object in the variable list """
self.search = self.prompt.simple(txt_msg)
self.search_lst = [i for i, s in enumerate(self.item_keys) if self.search in s]
self.search_index = 0
self.logger.info('Searching for : {} in :\n{}'.format(self.search, self.item_keys))
if self.search_lst and self.search:
if len(self.search_lst) == 1:
self.prompt_msg_setup("{} occurence of '{}' found".format(len(self.search_lst), self.search))
else:
self.prompt_msg_setup("{} occurences of '{}' found".format(len(self.search_lst), self.search))
self.position = self.search_lst[self.search_index]
self.page = ceil((self.position+1)/self.app.row_max)
elif not self.search:
pass
else:
self.prompt_msg_setup(self.search + ' not found !')
self.position = 0
self.page = 1
def search_item_next(self):
""" Next occurence of the searching. """
self.search_lst = [i for i, s in enumerate(self.item_keys) if self.search in s]
if self.search_lst and self.search_index < len(self.search_lst) - 1:
self.search_index += 1
else:
self.search_index = 0
self.position = self.search_lst[self.search_index]
self.page = ceil((self.position+1)/self.app.row_max)
def navigate_lst(self):
""" Navigation though the item list"""
self.pages = ceil((self.row_num + 1)/self.app.row_max)
if self.pkey in self.app.kdown:
self.navigate_down()
if self.pkey in self.app.kup:
self.navigate_up()
if self.pkey in self.app.kleft and self.page > 1:
self.navigate_left()
if self.pkey in self.app.kright and self.page < self.pages:
self.navigate_right()
def navigate_right(self):
""" Navigate Right. """
self.page = self.page + 1
self.position = self.app.row_max*(self.page-1)
def navigate_left(self):
""" Navigate Left. """
self.page = self.page - 1
self.position = self.app.row_max*(self.page-1)
def navigate_up(self):
""" Navigate Up. """
if self.page == 1:
if self.position > 0:
self.position = self.position - 1
else:
if self.position > self.app.row_max*(self.page - 1):
self.position = self.position - 1
else:
self.page = self.page - 1
self.position = self.app.row_max - 1 + self.app.row_max*(self.page - 1)
def navigate_down(self):
""" Navigate Down. """
# First page
if self.page == 1:
if (self.position < self.app.row_max - 1) and (self.position < self.row_num):
self.position = self.position + 1
else:
if self.pages > 1:
self.page = self.page + 1
self.position = self.app.row_max*(self.page - 1)
# Last page
elif self.page == self.pages:
if self.position < self.row_num:
self.position = self.position + 1
# Between
else:
if self.position < self.app.row_max - 1 + self.app.row_max*(self.page - 1):
self.position = self.position + 1
else:
self.page = self.page + 1
self.position = self.app.row_max*(self.page - 1)
def init_menu(self):
""" Init the menu """
self.selected = self.item_keys[self.position]
# Add specific initilization
self.menu_special_init()
# Create menu list
self.menu_lst = self.create_menu()
# Various variables
self.menu_cursor = 0
self.menu_title = ' ' + self.selected.split('/')[-1] + ' '
# Menu dimensions
self.menu_width = len(max(
[self.menu_lst[i][0] for i in range(len(self.menu_lst))], key=len))
self.menu_width = max(self.menu_width, len(self.menu_title)) + 4
self.menu_height = len(self.menu_lst) + 2
self.title_pos = int((self.menu_width - len(self.menu_title) - 2)/2)
# Init Menu
self.gwin_menu = self.app.stdscr.subwin(self.menu_height,
self.menu_width, 2,
self.app.screen_width-self.menu_width-2)
self.gwin_menu.border(0)
self.gwin_menu.bkgd(self.color('txt'))
self.gwin_menu.attrset(self.color('bdr')) # Change border color
self.gwin_menu.keypad(1)
# Send menu to a panel
self.gpan_menu = panel.new_panel(self.gwin_menu)
# Hide the panel. This does not delete the object, it just makes it invisible.
self.gpan_menu.hide()
panel.update_panels()
# Submenu
self.display_menu()
def menu_special_init(self):
""" Additionnal initialization for menu """
pass
def create_menu(self):
""" Create the item list for the kernel menu : To overload """
return [('No Option', 'None')]
def display_menu(self):
""" Display the menu """
self.gpan_menu.top() # Push the panel to the bottom of the stack.
self.gpan_menu.show() # Display the panel (which might have been hidden)
self.gwin_menu.clear()
menukey = -1
while menukey not in self.app.kquit:
self.gwin_menu.border(0)
# Title
if self.app.config['font']['pw-font'] == 'True':
self.gwin_menu.addstr(0, self.title_pos, '', self.color('pwf'))
self.gwin_menu.addstr(self.menu_title, self.color('ttl'))
self.gwin_menu.addstr('', self.color('pwf'))
else:
self.gwin_menu.addstr(0, self.title_pos,
'|' + self.menu_title + '|', self.color('ttl'))
self.gwin_menu.refresh()
# Create entries
for index, item in enumerate(self.menu_lst):
if index == self.menu_cursor:
mode = self.color('hh')
else:
mode = self.color('txt') | curses.A_DIM
self.gwin_menu.addstr(1+index, 1, item[0], mode)
# Wait for keyboard event
menukey = self.gwin_menu.getch()
if menukey in self.app.kenter:
eval(self.menu_lst[self.menu_cursor][1])
break
elif menukey in self.app.kup:
self.navigate_menu(-1, len(self.menu_lst))
elif menukey in self.app.kdown:
self.navigate_menu(1, len(self.menu_lst))
if menukey == curses.KEY_RESIZE:
self.resize = True
break
self.gwin_menu.clear()
self.gpan_menu.hide()
def navigate_menu(self, n, size):
""" Navigate through the menu """
self.menu_cursor += n
if self.menu_cursor < 0:
self.menu_cursor = 0
elif self.menu_cursor >= size:
self.menu_cursor = size - 1
| gpl-3.0 | -6,112,869,070,022,161,000 | 30.439377 | 115 | 0.539096 | false |
pdl30/pynoncode | pynoncode/fasta_parsers.py | 1 | 3648 | #!/usr/bin/python
########################################################################
# 28 Apr 2014
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import argparse
import subprocess
import sys, re, os
from collections import defaultdict
from itertools import izip
def parse_paired_fastq(fq1, fq2, outdir):
dict2 = defaultdict(int)
count_dict = defaultdict(int)
f1=open(fq1)
f2=open(fq2)
for line1, line2 in izip(f1, f2):
line1 = line1.rstrip()
id1 = line1.split("#")
line2 = line2.rstrip()
id2 = line2.split("#")
try:
id11 = next(f1)
read1 = id11.rstrip()
id22 = next(f2)
read2 = id22.rstrip()
reads = "{}\t{}".format(read1, read2)
dict2[reads] += 1
crap = next(f1)
crap2 = next(f1)
crap = next(f2)
crap2 = next(f2)
except StopIteration:
break
seen = {}
name1 = "original_fasta_1.fa"
name2 = "original_fasta_2.fa"
count = 1
output1 = open(outdir + "/" + name1, "wb")
output2 = open(outdir + "/" + name2, "wb")
for key in dict2.keys():
reads = key.split("\t")
output1.write(">ID:{}\n{}\n".format(count, reads[0])),
output2.write(">ID:{}\n{}\n".format(count, reads[1])),
count_dict[count] = dict2[key]
count += 1
output3 = open(outdir + "/" + "count_dict.txt", "w")
for key in count_dict.keys():
output3.write("{}\t{}\n".format(key, count_dict[key])),
def parse_single_fastq(fq1, outdir):
dict2 = defaultdict(int)
count_dict = defaultdict(int)
f1=open(fq1)
for line1 in f1:
line1 = line1.rstrip()
id1 = line1.split("#")
try:
id11 = next(f1)
read1 = id11.rstrip()
dict2[read1] += 1
crap = next(f1)
crap2 = next(f1)
except StopIteration:
break
seen = {}
name1 = "original_fasta.fa"
count = 1
output1 = open(outdir + "/" + name1, "wb")
for key in dict2.keys():
reads = key.split("\t")
output1.write(">ID:{}\n{}\n".format(count, reads[0])),
count_dict[count] = dict2[key]
count += 1
output3 = open(outdir + "/" + "count_dict.txt", "w")
for key in count_dict.keys():
output3.write("{}\t{}\n".format(key, count_dict[key])),
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
def stripper(fasta):
result = {}
with open(fasta) as f:
for name, seq in read_fasta(f):
bases = list(seq)
end1 = bases[-3:]
end1 = ''.join(end1)
if end1 == "CCA":
tmpseq = bases[:-3]
seq = ''.join(tmpseq)
end2 = bases[-4:]
end2 = ''.join(end2)
if end2 == "CCAC":
tmpseq = bases[:-4]
seq = ''.join(tmpseq)
end3 = bases[-5:]
end3 = ''.join(end3)
if end3 == "CCACC":
tmpseq = bases[:-5]
seq = ''.join(tmpseq)
end4 = bases[-6:]
end4 = ''.join(end4)
if end4 == "CCACCA":
tmpseq = bases[:-6]
seq = ''.join(tmpseq)
result[name] = seq
return result
def strip_ends(paired):
if paired == True:
output1 = open("clipped_1.fa", "w")
output2 = open("clipped_2.fa", "w")
data1 = stripper("unclipped_multi_unmapped_1.fa")
data2 = stripper("unclipped_multi_unmapped_2.fa")
for key in sorted(data1.keys()):
output1.write("{}\n{}\n".format(key, data1[key])),
for key in sorted(data2.keys()):
output2.write("{}\n{}\n".format(key, data2[key])),
else:
data1 = stripper("unclipped_multi_unmapped.fa")
output1 = open("clipped_fasta.fa", "w")
for key in sorted(data1.keys()):
output1.write("{}\n{}\n".format(key, data1[key])), | gpl-2.0 | 8,414,638,851,961,033,000 | 25.064286 | 72 | 0.584704 | false |
eklinkhammer/gym-ctf | gym_ctf/state/flag.py | 1 | 3158 | import numpy as np
import random
import math
class Flag():
""" A flag is target that agents use to score in capture the flag.
Once captured, it is marked as taken and stores the scoring team.
"""
def __init__(self, pos, scoring_radius):
assert scoring_radius >= 0
self.position = pos
self.scoring_radius = scoring_radius
self.taken = False
self.scoring_team = None
self.scoring_count = 0
def take(self, team_id):
self.taken = True
self.scoring_team = team_id
def reset(self):
self.taken = False
self.scoring_team = None
self.scoring_count = 0
def random_pos(min_x, min_y, max_x, max_y):
""" Generates a random tuple representing a 2D point within the box
defined by the ranges.
Args:
min_x (double): Minimum x value (lower-left corner)
min_y (double): Minimum y value (lower-left corner)
max_x (double): Maximum x value (upper-right corner)
max_y (double): Maximum y value (upper-right corner)
Returns:
(double, double). 2D point.
"""
if max_y is None: max_y = max_x
rand_x = random.randrange(min_x, max_x,1)
rand_y = random.randrange(min_y, max_y,1)
position = (rand_x, rand_y)
return position
def random_flag(min_x, min_y, max_x, max_y, scoring_radius):
""" Generates a random flag at a position within the bounding box
provided using the given scoring radius. Scoring radius is not
random because it depends (for best results) on container size.
Args:
min_x (double): Minimum x value (lower-left corner)
min_y (double): Minimum y value (lower-left corner)
max_x (double): Maximum x value (upper-right corner)
max_y (double): Maximum y value (upper-right corner)
scoring_radius (double): The radius within which agents can ctf
Returns:
Flag. A flag object at a random 2D point.
"""
return Flag(Flag.random_pos(min_x, min_y, max_x, max_y), scoring_radius)
def obs(self):
""" Returns the observation of a flag in format expected by gym env
Returns:
numpy array of length 3. Contains position of flag and scoring
team. Team is 0 if no team scores
"""
if self.taken:
team = self.scoring_team
else:
team = 0
return np.array([self.position[0], self.position[1], team])
def within_scoring_distance(self, position_other):
""" Determine if other position is within the scoring radius of the flag
Args:
position_other (2-tuple of doubles): 2D point
Returns:
boolean. True iff position_other is within scoring radius.
"""
distance = math.sqrt(math.pow(self.position[0] - position_other[0], 2) +
math.pow(self.position[1] - position_other[1], 2))
return distance <= self.scoring_radius
| mit | 6,347,739,872,781,049,000 | 33.703297 | 80 | 0.578214 | false |
gajim/python-nbxmpp | nbxmpp/modules/register/util.py | 1 | 4233 | # Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import Iq
from nbxmpp.const import REGISTER_FIELDS
from nbxmpp.structs import RegisterData
from nbxmpp.errors import StanzaError
from nbxmpp.errors import MalformedStanzaError
from nbxmpp.modules.dataforms import create_field
from nbxmpp.modules.dataforms import extend_form
from nbxmpp.modules.dataforms import SimpleDataForm
from nbxmpp.modules.bits_of_binary import parse_bob_data
def _make_password_change_request(domain, username, password):
iq = Iq('set', Namespace.REGISTER, to=domain)
query = iq.getQuery()
query.setTagData('username', username)
query.setTagData('password', password)
return iq
def _make_password_change_with_form(domain, form):
iq = Iq('set', Namespace.REGISTER, to=domain)
iq.setQueryPayload(form)
return iq
def _make_register_form(jid, form):
iq = Iq('set', Namespace.REGISTER, to=jid)
if form.is_fake_form():
query = iq.getTag('query')
for field in form.iter_fields():
if field.var == 'fakeform':
continue
query.addChild(field.var, payload=[field.value])
return iq
iq.setQueryPayload(form)
return iq
def _make_unregister_request(jid):
iq = Iq('set', to=jid)
query = iq.setQuery()
query.setNamespace(Namespace.REGISTER)
query.addChild('remove')
return iq
def _parse_oob_url(query):
oob = query.getTag('x', namespace=Namespace.X_OOB)
if oob is not None:
return oob.getTagData('url') or None
return None
def _parse_form(stanza):
query = stanza.getTag('query', namespace=Namespace.REGISTER)
form = query.getTag('x', namespace=Namespace.DATA)
if form is None:
return None
form = extend_form(node=form)
field = form.vars.get('FORM_TYPE')
if field is None:
return None
# Invalid urn:xmpp:captcha used by ejabberd
# See https://github.com/processone/ejabberd/issues/3045
if field.value in ('jabber:iq:register', 'urn:xmpp:captcha'):
return form
return None
def _parse_fields_form(query):
fields = []
for field in query.getChildren():
field_name = field.getName()
if field_name not in REGISTER_FIELDS:
continue
required = field_name in ('username', 'password')
typ = 'text-single' if field_name != 'password' else 'text-private'
fields.append(create_field(typ=typ,
var=field_name,
required=required))
if not fields:
return None
fields.append(create_field(typ='hidden', var='fakeform'))
return SimpleDataForm(type_='form',
instructions=query.getTagData('instructions'),
fields=fields)
def _parse_register_data(response):
query = response.getTag('query', namespace=Namespace.REGISTER)
if query is None:
raise StanzaError(response)
instructions = query.getTagData('instructions') or None
data = RegisterData(instructions=instructions,
form=_parse_form(response),
fields_form=_parse_fields_form(query),
oob_url=_parse_oob_url(query),
bob_data=parse_bob_data(query))
if (data.form is None and
data.fields_form is None and
data.oob_url is None):
raise MalformedStanzaError('invalid register response', response)
return data
| gpl-3.0 | 3,761,805,763,904,235,000 | 31.305344 | 75 | 0.661153 | false |
nvbn/coviolations_web | nodes/tests/test_create_images.py | 1 | 1789 | import sure
from mock import MagicMock
from django.core.management import call_command
from django.test import TestCase
from ..management.commands import create_images
class CreateImagesCase(TestCase):
"""Create images case"""
def setUp(self):
self._mock_connect_to_node()
self._mock_logger()
self._mock_iterate_images()
def _mock_connect_to_node(self):
"""Mock connect_to_node"""
self._orig_connect_to_node = create_images.connect_to_node
create_images.connect_to_node = MagicMock()
def _mock_logger(self):
"""Mock logger"""
self._orig_logger = create_images.logger
create_images.logger = MagicMock()
def _mock_iterate_images(self):
"""Mock iterate images"""
self._orig_iterate_images = create_images.Command._iterate_images
create_images.Command._iterate_images = MagicMock(return_value=[])
def tearDown(self):
create_images.connect_to_node = self._orig_connect_to_node
create_images.logger = self._orig_logger
create_images.Command._iterate_images = self._orig_iterate_images
def test_create_raw_image(self):
"""Test create raw image"""
node = MagicMock()
create_images.connect_to_node.return_value.__enter__.\
return_value = node
call_command('create_images')
node.save_image.assert_called_once_with('raw')
def test_create_other_images(self):
"""Test create other images"""
node = MagicMock()
create_images.connect_to_node.return_value.__enter__.\
return_value = node
create_images.Command._iterate_images.return_value = ['cat', 'dog']
call_command('create_images')
node.save_image.call_count.should.be.equal(3)
| mit | 1,887,358,779,549,905,700 | 34.078431 | 75 | 0.643376 | false |
thomasmoelhave/TerraNNI | data-generator.py | 1 | 2762 | #!/usr/bin/env python
# encoding: utf-8
import sys
import os
import math
import random
from optparse import OptionParser
# Simple function to write out results in a (which contains three arrays) to file fn
def writeArray(fn, a):
if fn:
f = open(fn, 'w')
for i in range(0,len(a)):
f.write("%f %f %f %d\n" % (a[i][0],a[i][1],a[i][2],a[i][3]))
f.close()
else:
for i in range(0,len(a)):
print "%f %f %f %d\n" % (a[i][0],a[i][1],a[i][2],a[i][3])
def linearTF(t,m = 1):
return t * m
def symetricSmooth(tf,to):
xo = yo = 0.5
def retFunc(x,y,t):
return to + tf(t) * math.cos( math.pow(x-xo,2) + math.pow(y-yo,2) )
return retFunc
def wall(xmin, xmax, tmin, tmax):
h = 20
b = 1
width = 5
def retFunc(x,y,t):
tp = 1 - (1.0 * (tmax - t - tmin) / (tmax - tmin))
xm = tp * (xmax - xmin) + xmin
if ( x >= xm and x < xm + width ):
return h
return b
return retFunc
def randData():
def retFunc(x,y,t):
return myrand(1,100,0.01)
return retFunc
def myrand(mn, mx, g):
m = 1
while (g * m) != int(g*m):
m = m*10
return (1.0 * random.randrange(m*mn, m*mx, m*g)) / m
def fill(f,n,p,ta,fn):
if fn:
fl = open(fn, 'w')
#r = []
for j in range(0,len(ta)):
t1 = ta[j]
print 'time: ', t1, n
for i in range(0,n):
x1 = myrand(p['xmin'],p['xmax'],p['gran'])
y1 = myrand(p['ymin'],p['ymax'],p['gran'])
if fn:
fl.write("%f %f %f %d\n" % (x1,y1,f(x1,y1,t1),t1))
else:
print "%f %f %f %d\n" % (x1,y1,f(x1,y1,t1),t1)
#r.append( (x1,y1,f(x1,y1,t1),t1) )
if fn:
fl.close()
#return r
def main():
parser = OptionParser()
parser.add_option("-f", "--file", action="store", type="string", dest="filename")
parser.add_option("-n", type="int", dest="n", default=1000)
(options, args) = parser.parse_args()
params = {'xmin': -100, 'ymin': -100, 'xmax': 100, 'ymax': 100, 'gran': 0.001 }
print "n: ", options.n
# Only use ONE Of these functions for any given run
#writeArray( options.filename, fill( symetricSmooth(linearTF,100.0), options.n, params, (0,10) ) )
#writeArray( options.filename, fill( wall(params['xmin'], params['xmax'],0,100), options.n, params, range(0,110,10) )
#writeArray( options.filename, fill( randData, 200000000, params, range(0,21,1) ))
#fill( symetricSmooth(linearTF,100.0), options.n, params, (0,10), options.filename )
ta= range(0,110,1)
ta.remove(10)
ta.remove(30)
ta.remove(50)
ta.remove(70)
ta.remove(90)
#fill( wall(params['xmin'], params['xmax'],0,100), options.n, params, ta, options.filename )
#fill( randData(), 200000000, params, range(0,21,1), options.filename)
#fill( randData(), 50000000, params, range(0,11,1), options.filename) # BIG DATA
fill( randData(), 10000000, params, range(0,11,1), options.filename)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,000,724,322,534,402,000 | 25.815534 | 118 | 0.60391 | false |
lym/allura-git | Allura/allura/lib/utils.py | 1 | 20481 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
import time
import string
import hashlib
import binascii
import logging.handlers
import codecs
from ming.odm import session
import os.path
import datetime
import random
import mimetypes
import re
import magic
from itertools import groupby
import collections
import tg
import pylons
import json
import webob.multidict
from formencode import Invalid
from tg.decorators import before_validate
from pylons import response
from pylons import tmpl_context as c
from pylons.controllers.util import etag_cache
from paste.deploy.converters import asbool, asint
from paste.httpheaders import CACHE_CONTROL, EXPIRES
from webhelpers.html import literal
from webob import exc
from pygments.formatters import HtmlFormatter
from setproctitle import getproctitle
import html5lib.sanitizer
from ew import jinja2_ew as ew
from ming.utils import LazyProperty
from ming.odm.odmsession import ODMCursor
MARKDOWN_EXTENSIONS = ['.markdown', '.mdown', '.mkdn', '.mkd', '.md']
def permanent_redirect(url):
try:
tg.redirect(url)
except exc.HTTPFound, err:
raise exc.HTTPMovedPermanently(location=err.location)
def guess_mime_type(filename):
'''Guess MIME type based on filename.
Applies heuristics, tweaks, and defaults in centralized manner.
'''
# Consider changing to strict=False
content_type = mimetypes.guess_type(filename, strict=True)
if content_type[0]:
content_type = content_type[0]
else:
content_type = 'application/octet-stream'
return content_type
class ConfigProxy(object):
'''Wrapper for loading config values at module-scope so we don't
have problems when a module is imported before tg.config is initialized
'''
def __init__(self, **kw):
self._kw = kw
def __getattr__(self, k):
return self.get(k)
def get(self, key, default=None):
return tg.config.get(self._kw.get(key, key), default)
def get_bool(self, key):
return asbool(self.get(key))
class lazy_logger(object):
'''Lazy instatiation of a logger, to ensure that it does not get
created before logging is configured (which would make it disabled)'''
def __init__(self, name):
self._name = name
@LazyProperty
def _logger(self):
return logging.getLogger(self._name)
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError, name
return getattr(self._logger, name)
class TimedRotatingHandler(logging.handlers.BaseRotatingHandler):
def __init__(self, strftime_pattern):
self.pattern = strftime_pattern
self.last_filename = self.current_filename()
logging.handlers.BaseRotatingHandler.__init__(
self, self.last_filename, 'a')
def current_filename(self):
return os.path.abspath(datetime.datetime.utcnow().strftime(self.pattern))
def shouldRollover(self, record):
'Inherited from BaseRotatingFileHandler'
return self.current_filename() != self.last_filename
def doRollover(self):
self.stream.close()
self.baseFilename = self.current_filename()
if self.encoding:
self.stream = codecs.open(self.baseFilename, 'w', self.encoding)
else:
self.stream = open(self.baseFilename, 'w')
class StatsHandler(TimedRotatingHandler):
fields = (
'action', 'action_type', 'tool_type', 'tool_mount', 'project', 'neighborhood',
'username', 'url', 'ip_address')
def __init__(self,
strftime_pattern,
module='allura',
page=1,
**kwargs):
self.page = page
self.module = module
TimedRotatingHandler.__init__(self, strftime_pattern)
def emit(self, record):
if not hasattr(record, 'action'):
return
kwpairs = dict(
module=self.module,
page=self.page)
for name in self.fields:
kwpairs[name] = getattr(record, name, None)
kwpairs.update(getattr(record, 'kwpairs', {}))
record.kwpairs = ','.join(
'%s=%s' % (k, v) for k, v in sorted(kwpairs.iteritems())
if v is not None)
record.exc_info = None # Never put tracebacks in the rtstats log
TimedRotatingHandler.emit(self, record)
class CustomWatchedFileHandler(logging.handlers.WatchedFileHandler):
"""Custom log handler for Allura"""
def format(self, record):
"""Prepends current process name to ``record.name`` if running in the
context of a taskd process that is currently processing a task.
"""
title = getproctitle()
if title.startswith('taskd:'):
record.name = "{0}:{1}".format(title, record.name)
return super(CustomWatchedFileHandler, self).format(record)
def chunked_find(cls, query=None, pagesize=1024, sort_key='_id', sort_dir=1):
'''
Execute a mongo query against the specified class, yield some results at
a time (avoids mongo cursor timeouts if the total result set is very large).
Pass an indexed sort_key for efficient queries. Default _id should work
in most cases.
'''
if query is None:
query = {}
page = 0
max_id = None
while True:
if sort_key:
if max_id:
if sort_key not in query:
query[sort_key] = {}
query[sort_key]['$gt'] = max_id
q = cls.query.find(query).limit(pagesize).sort(sort_key, sort_dir)
else:
# skipping requires scanning, even for an indexed query
q = cls.query.find(query).limit(pagesize).skip(pagesize * page)
results = (q.all())
if not results:
break
if sort_key:
max_id = results[-1][sort_key]
yield results
page += 1
def lsub_utf8(s, n):
'''Useful for returning n bytes of a UTF-8 string, rather than characters'''
while len(s) > n:
k = n
while (ord(s[k]) & 0xc0) == 0x80:
k -= 1
return s[:k]
return s
def chunked_list(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def chunked_iter(iterable, max_size):
'''return iterable 'chunks' from the iterable of max size max_size'''
eiter = enumerate(iterable)
keyfunc = lambda (i, x): i // max_size
for _, chunk in groupby(eiter, keyfunc):
yield (x for i, x in chunk)
class AntiSpam(object):
'''Helper class for bot-protecting forms'''
honey_field_template = string.Template('''<p class="$honey_class">
<label for="$fld_id">You seem to have CSS turned off.
Please don't fill out this field.</label><br>
<input id="$fld_id" name="$fld_name" type="text"><br></p>''')
def __init__(self, request=None, num_honey=2):
self.num_honey = num_honey
if request is None or request.method == 'GET':
self.request = pylons.request
self.timestamp = int(time.time())
self.spinner = self.make_spinner()
self.timestamp_text = str(self.timestamp)
self.spinner_text = self._wrap(self.spinner)
else:
self.request = request
self.timestamp_text = request.params['timestamp']
self.spinner_text = request.params['spinner']
self.timestamp = int(self.timestamp_text)
self.spinner = self._unwrap(self.spinner_text)
self.spinner_ord = map(ord, self.spinner)
self.random_padding = [random.randint(0, 255) for x in self.spinner]
self.honey_class = self.enc(self.spinner_text, css_safe=True)
# The counter is to ensure that multiple forms in the same page
# don't end up with the same id. Instead of doing:
#
# honey0, honey1
# which just relies on 0..num_honey we include a counter
# which is incremented every time extra_fields is called:
#
# honey00, honey 01, honey10, honey11
self.counter = 0
@staticmethod
def _wrap(s):
'''Encode a string to make it HTML id-safe (starts with alpha, includes
only digits, hyphens, underscores, colons, and periods). Luckily, base64
encoding doesn't use hyphens, underscores, colons, nor periods, so we'll
use these characters to replace its plus, slash, equals, and newline.
'''
tx_tbl = string.maketrans('+/', '-_')
s = binascii.b2a_base64(s)
s = s.rstrip('=\n')
s = s.translate(tx_tbl)
s = 'X' + s
return s
@staticmethod
def _unwrap(s):
tx_tbl = string.maketrans('-_', '+/')
s = s[1:]
s = str(s).translate(tx_tbl)
i = len(s) % 4
if i > 0:
s += '=' * (4 - i)
s = binascii.a2b_base64(s + '\n')
return s
def enc(self, plain, css_safe=False):
'''Stupid fieldname encryption. Not production-grade, but
hopefully "good enough" to stop spammers. Basically just an
XOR of the spinner with the unobfuscated field name
'''
# Plain starts with its length, includes the ordinals for its
# characters, and is padded with random data
plain = ([len(plain)]
+ map(ord, plain)
+ self.random_padding[:len(self.spinner_ord) - len(plain) - 1])
enc = ''.join(chr(p ^ s) for p, s in zip(plain, self.spinner_ord))
enc = self._wrap(enc)
if css_safe:
enc = ''.join(ch for ch in enc if ch.isalpha())
return enc
def dec(self, enc):
enc = self._unwrap(enc)
enc = list(map(ord, enc))
plain = [e ^ s for e, s in zip(enc, self.spinner_ord)]
plain = plain[1:1 + plain[0]]
plain = ''.join(map(chr, plain))
return plain
def extra_fields(self):
yield ew.HiddenField(name='timestamp', value=self.timestamp_text).display()
yield ew.HiddenField(name='spinner', value=self.spinner_text).display()
for fldno in range(self.num_honey):
fld_name = self.enc('honey%d' % (fldno))
fld_id = self.enc('honey%d%d' % (self.counter, fldno))
yield literal(self.honey_field_template.substitute(
honey_class=self.honey_class,
fld_id=fld_id,
fld_name=fld_name))
self.counter += 1
def make_spinner(self, timestamp=None):
if timestamp is None:
timestamp = self.timestamp
try:
client_ip = ip_address(self.request)
except (TypeError, AttributeError), err:
client_ip = '127.0.0.1'
plain = '%d:%s:%s' % (
timestamp, client_ip, pylons.config.get('spinner_secret', 'abcdef'))
return hashlib.sha1(plain).digest()
@classmethod
def validate_request(cls, request=None, now=None, params=None):
if request is None:
request = pylons.request
if params is None:
params = request.params
new_params = dict(params)
if not request.method == 'GET':
new_params.pop('timestamp', None)
new_params.pop('spinner', None)
obj = cls(request)
if now is None:
now = time.time()
if obj.timestamp > now + 5:
raise ValueError, 'Post from the future'
if now - obj.timestamp > 24 * 60 * 60:
raise ValueError, 'Post from the distant past'
if obj.spinner != obj.make_spinner(obj.timestamp):
raise ValueError, 'Bad spinner value'
for k in new_params.keys():
new_params[obj.dec(k)] = new_params.pop(k)
for fldno in range(obj.num_honey):
value = new_params.pop('honey%s' % fldno)
if value:
raise ValueError, 'Value in honeypot field: %s' % value
return new_params
@classmethod
def validate(cls, error_msg):
'''Controller decorator to raise Invalid errors if bot protection is engaged'''
def antispam_hook(remainder, params):
'''Converts various errors in validate_request to a single Invalid message'''
try:
new_params = cls.validate_request(params=params)
params.update(new_params)
except (ValueError, TypeError, binascii.Error):
raise Invalid(error_msg, params, None)
return before_validate(antispam_hook)
class TruthyCallable(object):
'''
Wraps a callable to make it truthy in a boolean context.
Assumes the callable returns a truthy value and can be called with no args.
'''
def __init__(self, callable):
self.callable = callable
def __call__(self, *args, **kw):
return self.callable(*args, **kw)
def __nonzero__(self):
return self.callable()
class TransformedDict(collections.MutableMapping):
"""
A dictionary which applies an arbitrary
key-altering function before accessing the keys.
From: http://stackoverflow.com/questions/3387691/python-how-to-perfectly-override-a-dict
"""
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key
class CaseInsensitiveDict(TransformedDict):
def __keytransform__(self, key):
return key.lower()
def postmortem_hook(etype, value, tb): # pragma no cover
import sys
import pdb
import traceback
try:
from IPython.ipapi import make_session
make_session()
from IPython.Debugger import Pdb
sys.stderr.write('Entering post-mortem IPDB shell\n')
p = Pdb(color_scheme='Linux')
p.reset()
p.setup(None, tb)
p.print_stack_trace()
sys.stderr.write('%s: %s\n' % (etype, value))
p.cmdloop()
p.forget()
# p.interaction(None, tb)
except ImportError:
sys.stderr.write('Entering post-mortem PDB shell\n')
traceback.print_exception(etype, value, tb)
pdb.post_mortem(tb)
class LineAnchorCodeHtmlFormatter(HtmlFormatter):
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
num = self.linenostart
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield (tup[0], '<div id="l%s" class="code_block">%s</div>' % (num, tup[1]))
num += 1
yield 0, '</pre>'
def generate_code_stats(blob):
stats = {'line_count': 0,
'code_size': 0,
'data_line_count': 0}
code = blob.text
lines = code.split('\n')
stats['code_size'] = blob.size
stats['line_count'] = len(lines)
spaces = re.compile(r'^\s*$')
stats['data_line_count'] = sum([1 for l in lines if not spaces.match(l)])
return stats
def is_text_file(file):
msg = magic.from_buffer(file[:1024])
if ("text" in msg) or ("empty" in msg):
return True
return False
def take_while_true(source):
x = source()
while x:
yield x
x = source()
def serve_file(fp, filename, content_type, last_modified=None,
cache_expires=None, size=None, embed=True, etag=None):
'''Sets the response headers and serves as a wsgi iter'''
if not etag and filename and last_modified:
etag = u'{0}?{1}'.format(filename, last_modified).encode('utf-8')
if etag:
etag_cache(etag)
pylons.response.headers['Content-Type'] = ''
pylons.response.content_type = content_type.encode('utf-8')
pylons.response.cache_expires = cache_expires or asint(
tg.config.get('files_expires_header_secs', 60 * 60))
pylons.response.last_modified = last_modified
if size:
pylons.response.content_length = size
if 'Pragma' in pylons.response.headers:
del pylons.response.headers['Pragma']
if 'Cache-Control' in pylons.response.headers:
del pylons.response.headers['Cache-Control']
if not embed:
pylons.response.headers.add(
'Content-Disposition',
'attachment;filename="%s"' % filename.encode('utf-8'))
# http://code.google.com/p/modwsgi/wiki/FileWrapperExtension
block_size = 4096
if 'wsgi.file_wrapper' in tg.request.environ:
return tg.request.environ['wsgi.file_wrapper'](fp, block_size)
else:
return iter(lambda: fp.read(block_size), '')
class ForgeHTMLSanitizer(html5lib.sanitizer.HTMLSanitizer):
valid_iframe_srcs = ('https://www.youtube.com/embed/', 'https://www.gittip.com/')
def sanitize_token(self, token):
if 'iframe' in self.allowed_elements:
self.allowed_elements.remove('iframe')
if token.get('name') == 'iframe':
attrs = dict(token.get('data'))
if attrs.get('src', '').startswith(self.valid_iframe_srcs):
self.allowed_elements.append('iframe')
return super(ForgeHTMLSanitizer, self).sanitize_token(token)
def ip_address(request):
ip = request.remote_addr
if tg.config.get('ip_address_header'):
ip = request.headers.get(tg.config['ip_address_header']) or ip
return ip
class EmptyCursor(ODMCursor):
"""Ming cursor with no results"""
def __init__(self, *args, **kw):
pass
@property
def extensions(self):
return []
def count(self):
return 0
def _next_impl(self):
raise StopIteration
def next(self):
raise StopIteration
def options(self, **kw):
return self
def limit(self, limit):
return self
def skip(self, skip):
return self
def hint(self, index_or_name):
return self
def sort(self, *args, **kw):
return self
class DateJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
return json.JSONEncoder.default(self, obj)
def clean_phone_number(number):
pattern = re.compile('\W+')
number = pattern.sub('', number)
return number
def phone_number_hash(number):
number = clean_phone_number(number)
return hashlib.sha1(number).hexdigest()
@contextmanager
def skip_mod_date(model_cls):
""" Avoids updating 'mod_date'
Useful for saving cache on a model and things like that.
.. note:: This only works when the changes made to the model are flushed.
:Example:
from allura import model as M
key = self.can_merge_cache_key()
with utils.skip_mod_date(M.MergeRequest):
self.can_merge_cache[key] = val
session(self).flush(self)
:param model_cls: The model *class* being updated.
"""
skip_mod_date = getattr(session(model_cls)._get(), 'skip_mod_date', False)
session(model_cls)._get().skip_mod_date = True
try:
yield
finally:
session(model_cls)._get().skip_mod_date = skip_mod_date | apache-2.0 | 4,468,536,068,037,648,400 | 30.953198 | 92 | 0.609541 | false |
jbalm/ActuarialCashFlowModel | liability/liability_data/Liabilities_data.py | 1 | 3997 | ## Progam packages
from ..Model_Point import Model_Point
## Python packages
import datetime as dt
from xlrd import open_workbook
import xlrd
import numpy as np
import xlwings as xw
class Liabilities_data(object):
"""
Objective:
==========
This class is meant to build up the policyholders database
Attributes:
===========
1. model_points:
Type: array
Function: collection of the model points characterized by its id.
Methods:
========
1. update:
"""
def __init__(self):
self.model_points = []
def update(self,path):
"""
Method: update
Function: updates data from an excel file named "data\Liability_Data.xls".
Parameter:
1. path:
Type: string
Function: a single directory or a file name (By default, path = 'data\Market_Environment.xls' and the excel file must be placed in the same folder as the main executed file)
"""
wb2 = open_workbook(path)
sheet = wb2.sheet_by_name("MP_test")
number_of_rows = sheet.nrows
mdp = Model_Point()
mdp.id = str(xw.sheets['MP_test'].range('B4').value)
mdp.average_age = int(xw.sheets['MP_test'].range('B5').value)
mdp.sexe = str(xw.sheets['MP_test'].range('B6').value)
# ========================================================================================
# Souscription Date
# ========================================================================================
assert sheet.cell(6,1).ctype == 3, 'Souscription Date must be datetime type'
ms_date_number = sheet.cell(6,1).value
year, month, day, hour, minute, second = xlrd.xldate_as_tuple(ms_date_number,wb2.datemode)
mdp.subscription_date = dt.datetime(year, month, day)
# ========================================================================================
# Valuation Date
# ========================================================================================
assert sheet.cell(7,1).ctype == 3, 'Valuation Date must be datetime type'
ms_date_number = sheet.cell(7,1).value
year, month, day, hour, minute, second = xlrd.xldate_as_tuple(ms_date_number,wb2.datemode)
mdp.valuation_date = dt.datetime(year, month, day)
mdp.get_seniority()
# =======================================================================================
mdp.premium = xw.sheets['MP_test'].range('B9').value
mdp.actual_math_provision = xw.sheets['MP_test'].range('B10').value
mdp.mathematical_provision.append(mdp.actual_math_provision)
# ===============================================================
# get TMG
mdp.TMG_type = xw.sheets['MP_test'].range('B11').value
mdp.TMG = mdp.TMG_type * np.ones(100)
# ===============================================================
mdp.rate_sensibility = xw.sheets['MP_test'].range('B12').value
mdp.margin_rate = xw.sheets['MP_test'].range('B13').value
mdp.number_contract = xw.sheets['MP_test'].range('B14').value
# ===============================================================
# get lapse rate
mdp.lapse_type = xw.sheets['MP_test'].range('B15').value
mdp.lapse_rate = mdp.lapse_type * np.ones(100)
# ===============================================================
mortality_rate = []
for row in range(3, number_of_rows):
mort_rate = (sheet.cell(row, 4).value)
mortality_rate.append(mort_rate)
mdp.mortality_rate = np.concatenate((mortality_rate, [mortality_rate[-1] for t in range(100)]), axis = 0)
self.model_points.append(mdp)
def affiche(self):
for mdl_point in self.model_points:
print(mdl_point)
| gpl-3.0 | -3,551,808,631,612,540,000 | 41.084211 | 193 | 0.470603 | false |
fwenzel/millimeter | apps/shortener/tests.py | 1 | 4985 | import random
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import client, TestCase
from lib import base62
from .models import AutoSlug, Link
class ShortenerTestCase(TestCase):
"""Base TestCase for all Shortener tests"""
def setUp(self):
# user and login
self.username = 'john'
self.pw = 'johnpw'
self.user = User.objects.create_user(self.username,
'lennon@thebeatles.com', self.pw)
self.user.is_staff = True
self.user.save()
self.c = client.Client()
def login(self):
self.c.login(username=self.username, password=self.pw)
class AutoSlugTest(ShortenerTestCase):
def test_increasing(self):
"""Are autoslugs increasing?"""
slug1 = AutoSlug.next_autoslug()
slug2 = AutoSlug.next_autoslug()
self.assert_(base62.from62(slug1) < base62.from62(slug2),
'slugs must keep increasing')
class LinkTest(ShortenerTestCase):
def test_autoslug(self):
"""If slug is undefined, will autoslug be assigned?"""
link = Link(url='http://example.com')
link.save()
self.assert_(
link.slug, 'autoslug must be assigned when slug is undefined')
def test_autoslug_no_reassignment(self):
"""
Create a link, delete it, create a new one. Make sure the IDs differ.
"""
link1 = Link(url='http://example.com')
link1.save()
slug1 = link1.slug
link1.delete()
link2 = Link(url='http://example.net')
link2.save()
slug2 = link2.slug
self.assertNotEqual(slug1, slug2, 'slugs must not be reassigned')
class ViewTest(ShortenerTestCase):
def test_forward_valid(self):
"""test if forwarding works"""
link = Link(url='http://example.com')
link.save()
response = self.c.get(reverse('shortener.views.forward', args=[link.slug]))
self.assertEqual(response.status_code, 301,
'valid URLs lead to 301 redirect')
def test_forward_invalid(self):
"""accessing an unknown URL slug"""
response = self.c.get(reverse('shortener.views.forward', args=['abcdef']))
self.assertEqual(response.status_code, 404,
'invalid URLs lead to 404')
def test_create_link_unauthorized(self):
"""creating a link via the front page as an anonymous user"""
myurl = 'http://example.com'
self.c.post(reverse('index'), {'url': myurl})
try:
link = Link.objects.get(url=myurl)
except Link.DoesNotExist:
link = None
self.assertFalse(link,
'creating a link via the front page should not work '
'for unauthorized users')
def test_create_link_authorized(self):
"""creating a link via the front page as authorized user"""
myurl = 'http://example.com'
self.login()
self.c.post(reverse('index'), {'url': myurl})
try:
link = Link.objects.get(url__startswith=myurl)
except Link.DoesNotExist:
link = None
self.assert_(link, 'creating a link via the front page works')
def test_create_same_link_with_slug(self):
"""
creating a link with a slug and without won't map the second request
to the user-defined slug
"""
myurl = 'http://example.com/'
myslug = 'awesome'
self.login()
self.c.post(reverse('index'), {'url': myurl, 'slug': myslug})
self.c.post(reverse('index'), {'url': myurl})
linkcount = Link.objects.filter(url__exact=myurl).count()
self.assertEquals(linkcount, 2,
'request for the same url should not be mapped to '
'the same item if user-defined slug was set')
def test_create_same_link_without_slug(self):
"""
creating the same link twice will be mapped to the same item
"""
myurl = 'http://example.com/'
self.login()
self.c.post(reverse('index'), {'url': myurl})
self.c.post(reverse('index'), {'url': myurl})
linkcount = Link.objects.filter(url__exact=myurl).count()
self.assertEquals(linkcount, 1,
'request for the same url witout slug will be '
'mapped to the same item')
class StatsTest(ShortenerTestCase):
def test_count_visits(self):
"""check visit count"""
link = Link(url='http://example.com')
link.save()
visits = random.randint(1, 100)
for i in range(visits):
self.c.get(reverse('shortener.views.forward', args=[link.slug]))
link = Link.objects.get(pk=link.pk)
self.assertEqual(visits, link.visited,
'number of visits needs to be counted correctly')
| bsd-3-clause | -1,856,158,805,643,254,500 | 33.143836 | 83 | 0.584353 | false |
insolite/alarme | tests/test_core/test_action.py | 1 | 1420 | from unittest.mock import MagicMock
from asynctest import CoroutineMock
from alarme import Action
from tests.common import BaseTest
class ActionTest(BaseTest):
def setUp(self):
super().setUp()
self.app = MagicMock()
self.id = MagicMock()
self.action = Action(self.app, self.id)
def test_execute__end(self):
expected_result = MagicMock()
self.action.run = CoroutineMock(return_value=expected_result)
self.action.cleanup = CoroutineMock()
result = self.loop.run_until_complete(self.action.execute())
self.action.run.assert_called_once_with()
self.action.cleanup.assert_called_once_with()
self.assertEqual(result, expected_result)
def test_execute__crash(self):
class Error(Exception):
pass
self.action.run = CoroutineMock(side_effect=Error)
self.action.cleanup = CoroutineMock()
self.assertRaises(Error, self.loop.run_until_complete, self.action.execute())
self.action.run.assert_called_once_with()
self.action.cleanup.assert_called_once_with()
def test_run(self):
self.loop.run_until_complete(self.action.run())
def test_stop(self):
self.action.running = True
self.action.stop()
self.assertFalse(self.action.running)
def test_cleanup(self):
self.loop.run_until_complete(self.action.cleanup())
| mit | -6,345,220,306,105,214,000 | 27.4 | 85 | 0.659859 | false |
jcsp/manila | manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py | 1 | 129099 | # Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2015 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import time
import ddt
import mock
from oslo_log import log
import six
from manila import exception
from manila.share.drivers.netapp.dataontap.client import client_base
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fake_api \
as netapp_api
from manila.tests.share.drivers.netapp.dataontap.client import fakes as fake
@ddt.ddt
class NetAppClientCmodeTestCase(test.TestCase):
def setUp(self):
super(NetAppClientCmodeTestCase, self).setUp()
# Mock loggers as themselves to allow logger arg validation
mock_logger = log.getLogger('mock_logger')
self.mock_object(client_cmode.LOG,
'error',
mock.Mock(side_effect=mock_logger.error))
self.mock_object(client_cmode.LOG,
'warning',
mock.Mock(side_effect=mock_logger.warning))
self.mock_object(client_cmode.LOG,
'debug',
mock.Mock(side_effect=mock_logger.debug))
self.mock_object(client_base.NetAppBaseClient,
'get_ontapi_version',
mock.Mock(return_value=(1, 20)))
# Inject fake netapp_lib module classes.
netapp_api.mock_netapp_lib([client_base, client_cmode])
self.client = client_cmode.NetAppCmodeClient(**fake.CONNECTION_INFO)
self.client.connection = mock.MagicMock()
self.vserver_client = client_cmode.NetAppCmodeClient(
**fake.CONNECTION_INFO)
self.vserver_client.set_vserver(fake.VSERVER_NAME)
self.vserver_client.connection = mock.MagicMock()
def _mock_api_error(self, code='fake'):
return mock.Mock(side_effect=netapp_api.NaApiError(code=code))
def test_init_features_ontapi_1_21(self):
self.mock_object(client_base.NetAppBaseClient,
'get_ontapi_version',
mock.Mock(return_value=(1, 21)))
self.client._init_features()
self.assertFalse(self.client.features.BROADCAST_DOMAINS)
self.assertFalse(self.client.features.IPSPACES)
self.assertFalse(self.client.features.SUBNETS)
@ddt.data((1, 30), (1, 40), (2, 0))
def test_init_features_ontapi_1_30(self, ontapi_version):
self.mock_object(client_base.NetAppBaseClient,
'get_ontapi_version',
mock.Mock(return_value=ontapi_version))
self.client._init_features()
self.assertTrue(self.client.features.BROADCAST_DOMAINS)
self.assertTrue(self.client.features.IPSPACES)
self.assertTrue(self.client.features.SUBNETS)
def test_invoke_vserver_api(self):
self.client._invoke_vserver_api('fake-api', 'fake_vserver')
self.client.connection.set_vserver.assert_has_calls(
[mock.call('fake_vserver')])
self.client.connection.invoke_successfully.assert_has_calls(
[mock.call('fake-api', True)])
def test_has_records(self):
self.assertTrue(self.client._has_records(
netapp_api.NaElement(fake.VSERVER_GET_ITER_RESPONSE)))
def test_has_records_not_found(self):
self.assertFalse(self.client._has_records(
netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)))
def test_set_vserver(self):
self.client.set_vserver(fake.VSERVER_NAME)
self.client.connection.set_vserver.assert_has_calls(
[mock.call('fake_vserver')])
def test_vserver_exists(self):
api_response = netapp_api.NaElement(fake.VSERVER_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
vserver_get_args = {
'query': {'vserver-info': {'vserver-name': fake.VSERVER_NAME}},
'desired-attributes': {'vserver-info': {'vserver-name': None}}
}
result = self.client.vserver_exists(fake.VSERVER_NAME)
self.client.send_request.assert_has_calls([
mock.call('vserver-get-iter', vserver_get_args)])
self.assertTrue(result)
def test_vserver_exists_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.vserver_exists(fake.VSERVER_NAME)
self.assertFalse(result)
def test_create_vserver(self):
self.mock_object(self.client, 'send_request')
vserver_create_args = {
'vserver-name': fake.VSERVER_NAME,
'root-volume-security-style': 'unix',
'root-volume-aggregate': fake.ROOT_VOLUME_AGGREGATE_NAME,
'root-volume': fake.ROOT_VOLUME_NAME,
'name-server-switch': {'nsswitch': 'file'}
}
vserver_modify_args = {
'aggr-list': [{'aggr-name': aggr_name} for aggr_name
in fake.SHARE_AGGREGATE_NAMES],
'vserver-name': fake.VSERVER_NAME
}
self.client.create_vserver(fake.VSERVER_NAME,
fake.ROOT_VOLUME_AGGREGATE_NAME,
fake.ROOT_VOLUME_NAME,
fake.SHARE_AGGREGATE_NAMES)
self.client.send_request.assert_has_calls([
mock.call('vserver-create', vserver_create_args),
mock.call('vserver-modify', vserver_modify_args)])
def test_get_vserver_root_volume_name(self):
api_response = netapp_api.NaElement(
fake.VSERVER_GET_ROOT_VOLUME_NAME_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
vserver_get_args = {
'query': {'vserver-info': {'vserver-name': fake.VSERVER_NAME}},
'desired-attributes': {'vserver-info': {'root-volume': None}}
}
result = self.client.get_vserver_root_volume_name(fake.VSERVER_NAME)
self.client.send_request.assert_has_calls([
mock.call('vserver-get-iter', vserver_get_args)])
self.assertEqual(fake.ROOT_VOLUME_NAME, result)
def test_get_vserver_root_volume_name_not_found(self):
api_response = netapp_api.NaElement(
fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.assertRaises(exception.NetAppException,
self.client.get_vserver_root_volume_name,
fake.VSERVER_NAME)
def test_list_vservers(self):
api_response = netapp_api.NaElement(
fake.VSERVER_DATA_LIST_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_vservers()
vserver_get_iter_args = {
'query': {
'vserver-info': {
'vserver-type': 'data'
}
},
'desired-attributes': {
'vserver-info': {
'vserver-name': None
}
}
}
self.client.send_request.assert_has_calls([
mock.call('vserver-get-iter', vserver_get_iter_args)])
self.assertListEqual([fake.VSERVER_NAME], result)
def test_list_vservers_node_type(self):
api_response = netapp_api.NaElement(
fake.VSERVER_DATA_LIST_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_vservers(vserver_type='node')
vserver_get_iter_args = {
'query': {
'vserver-info': {
'vserver-type': 'node'
}
},
'desired-attributes': {
'vserver-info': {
'vserver-name': None
}
}
}
self.client.send_request.assert_has_calls([
mock.call('vserver-get-iter', vserver_get_iter_args)])
self.assertListEqual([fake.VSERVER_NAME], result)
def test_list_vservers_not_found(self):
api_response = netapp_api.NaElement(
fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_vservers(vserver_type='data')
self.assertListEqual([], result)
def test_get_vserver_volume_count(self):
api_response = netapp_api.NaElement(fake.VOLUME_COUNT_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_vserver_volume_count()
self.assertEqual(2, result)
def test_delete_vserver_no_volumes(self):
self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
self.mock_object(self.client,
'get_vserver_volume_count',
mock.Mock(return_value=0))
self.mock_object(self.client, '_terminate_vserver_services')
self.mock_object(self.client, 'send_request')
self.client.delete_vserver(
fake.VSERVER_NAME,
self.vserver_client,
security_services=[fake.CIFS_SECURITY_SERVICE])
self.client._terminate_vserver_services.assert_called_with(
fake.VSERVER_NAME, self.vserver_client,
[fake.CIFS_SECURITY_SERVICE])
vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME}
self.client.send_request.assert_has_calls([
mock.call('vserver-destroy', vserver_destroy_args)])
def test_delete_vserver_one_volume(self):
self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
self.mock_object(self.vserver_client,
'get_vserver_volume_count',
mock.Mock(return_value=1))
self.mock_object(self.client, 'send_request')
self.mock_object(self.vserver_client, 'offline_volume')
self.mock_object(self.vserver_client, 'delete_volume')
self.client.delete_vserver(fake.VSERVER_NAME,
self.vserver_client)
self.vserver_client.offline_volume.assert_called_with(
fake.ROOT_VOLUME_NAME)
self.vserver_client.delete_volume.assert_called_with(
fake.ROOT_VOLUME_NAME)
vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME}
self.client.send_request.assert_has_calls([
mock.call('vserver-destroy', vserver_destroy_args)])
def test_delete_vserver_one_volume_already_offline(self):
self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
self.mock_object(self.vserver_client,
'get_vserver_volume_count',
mock.Mock(return_value=1))
self.mock_object(self.client, 'send_request')
self.mock_object(self.vserver_client,
'offline_volume',
self._mock_api_error(code=netapp_api.EVOLUMEOFFLINE))
self.mock_object(self.vserver_client, 'delete_volume')
self.client.delete_vserver(fake.VSERVER_NAME,
self.vserver_client)
self.vserver_client.offline_volume.assert_called_with(
fake.ROOT_VOLUME_NAME)
self.vserver_client.delete_volume.assert_called_with(
fake.ROOT_VOLUME_NAME)
vserver_destroy_args = {'vserver-name': fake.VSERVER_NAME}
self.client.send_request.assert_has_calls([
mock.call('vserver-destroy', vserver_destroy_args)])
self.assertEqual(1, client_cmode.LOG.error.call_count)
def test_delete_vserver_one_volume_api_error(self):
self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
self.mock_object(self.vserver_client,
'get_vserver_volume_count',
mock.Mock(return_value=1))
self.mock_object(self.client, 'send_request')
self.mock_object(self.vserver_client,
'offline_volume',
self._mock_api_error())
self.mock_object(self.vserver_client, 'delete_volume')
self.assertRaises(netapp_api.NaApiError,
self.client.delete_vserver,
fake.VSERVER_NAME,
self.vserver_client)
def test_delete_vserver_multiple_volumes(self):
self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=True))
self.mock_object(self.client,
'get_vserver_root_volume_name',
mock.Mock(return_value=fake.ROOT_VOLUME_NAME))
self.mock_object(self.vserver_client,
'get_vserver_volume_count',
mock.Mock(return_value=2))
self.assertRaises(exception.NetAppException,
self.client.delete_vserver,
fake.VSERVER_NAME,
self.vserver_client)
def test_delete_vserver_not_found(self):
self.mock_object(self.client,
'vserver_exists',
mock.Mock(return_value=False))
self.client.delete_vserver(fake.VSERVER_NAME,
self.vserver_client)
self.assertEqual(1, client_cmode.LOG.error.call_count)
def test_terminate_vserver_services(self):
self.mock_object(self.vserver_client, 'send_request')
self.client._terminate_vserver_services(fake.VSERVER_NAME,
self.vserver_client,
[fake.CIFS_SECURITY_SERVICE])
cifs_server_delete_args = {
'admin-password': fake.CIFS_SECURITY_SERVICE['password'],
'admin-username': fake.CIFS_SECURITY_SERVICE['user'],
}
self.vserver_client.send_request.assert_has_calls([
mock.call('cifs-server-delete', cifs_server_delete_args)])
def test_terminate_vserver_services_cifs_not_found(self):
self.mock_object(self.vserver_client,
'send_request',
self._mock_api_error(
code=netapp_api.EOBJECTNOTFOUND))
self.client._terminate_vserver_services(fake.VSERVER_NAME,
self.vserver_client,
[fake.CIFS_SECURITY_SERVICE])
cifs_server_delete_args = {
'admin-password': fake.CIFS_SECURITY_SERVICE['password'],
'admin-username': fake.CIFS_SECURITY_SERVICE['user'],
}
self.vserver_client.send_request.assert_has_calls([
mock.call('cifs-server-delete', cifs_server_delete_args)])
self.assertEqual(1, client_cmode.LOG.error.call_count)
def test_terminate_vserver_services_api_error(self):
side_effects = [netapp_api.NaApiError(code='fake'), None]
self.mock_object(self.vserver_client,
'send_request',
mock.Mock(side_effect=side_effects))
self.client._terminate_vserver_services(fake.VSERVER_NAME,
self.vserver_client,
[fake.CIFS_SECURITY_SERVICE])
cifs_server_delete_args = {
'admin-password': fake.CIFS_SECURITY_SERVICE['password'],
'admin-username': fake.CIFS_SECURITY_SERVICE['user'],
}
self.vserver_client.send_request.assert_has_calls([
mock.call('cifs-server-delete', cifs_server_delete_args),
mock.call('cifs-server-delete')])
self.assertEqual(0, client_cmode.LOG.error.call_count)
def test_list_cluster_nodes(self):
api_response = netapp_api.NaElement(
fake.SYSTEM_NODE_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_cluster_nodes()
self.assertListEqual([fake.NODE_NAME], result)
def test_list_cluster_nodes_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_cluster_nodes()
self.assertListEqual([], result)
def test_list_node_data_ports(self):
self.mock_object(self.client,
'get_node_data_ports',
mock.Mock(return_value=fake.SPEED_SORTED_PORTS))
result = self.client.list_node_data_ports(fake.NODE_NAME)
self.assertSequenceEqual(fake.SPEED_SORTED_PORT_NAMES, result)
def test_get_node_data_ports(self):
api_response = netapp_api.NaElement(fake.NET_PORT_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_node_data_ports(fake.NODE_NAME)
net_port_get_iter_args = {
'query': {
'net-port-info': {
'node': fake.NODE_NAME,
'link-status': 'up',
'port-type': 'physical|if_group',
'role': 'data',
},
},
'desired-attributes': {
'net-port-info': {
'port': None,
'node': None,
'operational-speed': None,
'ifgrp-port': None,
},
},
}
self.assertSequenceEqual(fake.SPEED_SORTED_PORTS, result)
self.client.send_request.assert_has_calls([
mock.call('net-port-get-iter', net_port_get_iter_args)])
def test_get_node_data_ports_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_node_data_ports(fake.NODE_NAME)
self.assertSequenceEqual([], result)
def test_sort_data_ports_by_speed(self):
result = self.client._sort_data_ports_by_speed(
fake.UNSORTED_PORTS_ALL_SPEEDS)
self.assertSequenceEqual(fake.SORTED_PORTS_ALL_SPEEDS, result)
def test_list_aggregates(self):
api_response = netapp_api.NaElement(fake.AGGR_GET_NAMES_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_aggregates()
self.assertSequenceEqual(fake.SHARE_AGGREGATE_NAMES, result)
def test_list_aggregates_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.assertRaises(exception.NetAppException,
self.client.list_aggregates)
def test_list_vserver_aggregates(self):
self.mock_object(self.vserver_client,
'get_vserver_aggregate_capacities',
mock.Mock(return_value=fake.VSERVER_AGGREGATES))
result = self.vserver_client.list_vserver_aggregates()
self.assertListEqual(list(fake.VSERVER_AGGREGATES.keys()), result)
def test_list_vserver_aggregates_none_found(self):
self.mock_object(self.vserver_client,
'get_vserver_aggregate_capacities',
mock.Mock(return_value={}))
result = self.vserver_client.list_vserver_aggregates()
self.assertListEqual([], result)
@ddt.data((True, True), (True, False), (False, True), (False, False))
@ddt.unpack
def test_create_network_interface(self, broadcast_domains_supported,
use_vlans):
self.client.features.add_feature('BROADCAST_DOMAINS',
broadcast_domains_supported)
self.mock_object(self.client, '_ensure_broadcast_domain_for_port')
self.mock_object(self.client, '_create_vlan')
self.mock_object(self.client, 'send_request')
lif_create_args = {
'address': fake.IP_ADDRESS,
'administrative-status': 'up',
'data-protocols': [
{'data-protocol': 'nfs'},
{'data-protocol': 'cifs'}
],
'home-node': fake.NODE_NAME,
'home-port': fake.VLAN_PORT if use_vlans else fake.PORT,
'netmask': fake.NETMASK,
'interface-name': fake.LIF_NAME,
'role': 'data',
'vserver': fake.VSERVER_NAME,
}
self.client.create_network_interface(fake.IP_ADDRESS, fake.NETMASK,
fake.VLAN if use_vlans else None,
fake.NODE_NAME, fake.PORT,
fake.VSERVER_NAME,
fake.NET_ALLOCATION_ID,
fake.LIF_NAME_TEMPLATE)
if use_vlans:
self.client._create_vlan.assert_called_with(
fake.NODE_NAME, fake.PORT, fake.VLAN)
else:
self.assertFalse(self.client._create_vlan.called)
if broadcast_domains_supported:
self.client._ensure_broadcast_domain_for_port.assert_called_with(
fake.NODE_NAME, fake.VLAN_PORT if use_vlans else fake.PORT)
else:
self.assertFalse(
self.client._ensure_broadcast_domain_for_port.called)
self.client.send_request.assert_has_calls([
mock.call('net-interface-create', lif_create_args)])
def test_create_vlan(self):
self.mock_object(self.client, 'send_request')
vlan_create_args = {
'vlan-info': {
'parent-interface': fake.PORT,
'node': fake.NODE_NAME,
'vlanid': fake.VLAN
}
}
self.client._create_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN)
self.client.send_request.assert_has_calls([
mock.call('net-vlan-create', vlan_create_args)])
def test_create_vlan_already_present(self):
self.mock_object(self.client,
'send_request',
self._mock_api_error(code=netapp_api.EDUPLICATEENTRY))
vlan_create_args = {
'vlan-info': {
'parent-interface': fake.PORT,
'node': fake.NODE_NAME,
'vlanid': fake.VLAN
}
}
self.client._create_vlan(fake.NODE_NAME, fake.PORT, fake.VLAN)
self.client.send_request.assert_has_calls([
mock.call('net-vlan-create', vlan_create_args)])
self.assertEqual(1, client_cmode.LOG.debug.call_count)
def test_create_vlan_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(exception.NetAppException,
self.client._create_vlan,
fake.NODE_NAME,
fake.PORT,
fake.VLAN)
def test_ensure_broadcast_domain_for_port_has_domain(self):
self.mock_object(self.client,
'_get_broadcast_domain_for_port',
mock.Mock(return_value=fake.BROADCAST_DOMAIN))
self.mock_object(self.client, '_broadcast_domain_exists')
self.mock_object(self.client, '_create_broadcast_domain')
self.mock_object(self.client, '_add_port_to_broadcast_domain')
self.client._ensure_broadcast_domain_for_port(fake.NODE_NAME,
fake.PORT)
self.client._get_broadcast_domain_for_port.assert_has_calls([
mock.call(fake.NODE_NAME, fake.PORT)])
self.assertFalse(self.client._broadcast_domain_exists.called)
self.assertFalse(self.client._create_broadcast_domain.called)
self.assertFalse(self.client._add_port_to_broadcast_domain.called)
def test_ensure_broadcast_domain_for_port_domain_not_found(self):
self.mock_object(self.client,
'_get_broadcast_domain_for_port',
mock.Mock(return_value=None))
self.mock_object(self.client,
'_broadcast_domain_exists',
mock.Mock(return_value=False))
self.mock_object(self.client, '_create_broadcast_domain')
self.mock_object(self.client, '_add_port_to_broadcast_domain')
self.client._ensure_broadcast_domain_for_port(
fake.NODE_NAME, fake.PORT, domain=fake.BROADCAST_DOMAIN,
ipspace=fake.IPSPACE)
self.client._get_broadcast_domain_for_port.assert_has_calls([
mock.call(fake.NODE_NAME, fake.PORT)])
self.client._broadcast_domain_exists.assert_has_calls([
mock.call(fake.BROADCAST_DOMAIN, fake.IPSPACE)])
self.client._create_broadcast_domain.assert_has_calls([
mock.call(fake.BROADCAST_DOMAIN, fake.IPSPACE)])
self.client._add_port_to_broadcast_domain.assert_has_calls([
mock.call(fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN,
fake.IPSPACE)])
def test_ensure_broadcast_domain_for_port_domain_found(self):
self.mock_object(self.client,
'_get_broadcast_domain_for_port',
mock.Mock(return_value=None))
self.mock_object(self.client,
'_broadcast_domain_exists',
mock.Mock(return_value=True))
self.mock_object(self.client, '_create_broadcast_domain')
self.mock_object(self.client, '_add_port_to_broadcast_domain')
self.client._ensure_broadcast_domain_for_port(
fake.NODE_NAME, fake.PORT, domain=fake.BROADCAST_DOMAIN,
ipspace=fake.IPSPACE)
self.client._get_broadcast_domain_for_port.assert_has_calls([
mock.call(fake.NODE_NAME, fake.PORT)])
self.client._broadcast_domain_exists.assert_has_calls([
mock.call(fake.BROADCAST_DOMAIN, fake.IPSPACE)])
self.assertFalse(self.client._create_broadcast_domain.called)
self.client._add_port_to_broadcast_domain.assert_has_calls([
mock.call(fake.NODE_NAME, fake.PORT, fake.BROADCAST_DOMAIN,
fake.IPSPACE)])
def test_get_broadcast_domain_for_port(self):
api_response = netapp_api.NaElement(
fake.NET_PORT_GET_ITER_BROADCAST_DOMAIN_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
net_port_get_iter_args = {
'query': {
'net-port-info': {
'node': fake.NODE_NAME,
'port': fake.PORT,
},
},
'desired-attributes': {
'net-port-info': {
'broadcast-domain': None,
},
},
}
result = self.client._get_broadcast_domain_for_port(fake.NODE_NAME,
fake.PORT)
self.client.send_request.assert_has_calls([
mock.call('net-port-get-iter', net_port_get_iter_args)])
self.assertEqual(fake.BROADCAST_DOMAIN, result)
def test_get_broadcast_domain_for_port_port_not_found(self):
api_response = netapp_api.NaElement(
fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.assertRaises(exception.NetAppException,
self.client._get_broadcast_domain_for_port,
fake.NODE_NAME,
fake.PORT)
def test_get_broadcast_domain_for_port_domain_not_found(self):
api_response = netapp_api.NaElement(
fake.NET_PORT_GET_ITER_BROADCAST_DOMAIN_MISSING_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._get_broadcast_domain_for_port(fake.NODE_NAME,
fake.PORT)
self.assertIsNone(result)
def test_broadcast_domain_exists(self):
api_response = netapp_api.NaElement(
fake.NET_PORT_BROADCAST_DOMAIN_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._broadcast_domain_exists(fake.BROADCAST_DOMAIN,
fake.IPSPACE)
net_port_broadcast_domain_get_iter_args = {
'query': {
'net-port-broadcast-domain-info': {
'ipspace': fake.IPSPACE,
'broadcast-domain': fake.BROADCAST_DOMAIN,
},
},
'desired-attributes': {
'net-port-broadcast-domain-info': None,
},
}
self.client.send_request.assert_has_calls([
mock.call('net-port-broadcast-domain-get-iter',
net_port_broadcast_domain_get_iter_args)])
self.assertTrue(result)
def test_broadcast_domain_exists_not_found(self):
api_response = netapp_api.NaElement(
fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._broadcast_domain_exists(fake.BROADCAST_DOMAIN,
fake.IPSPACE)
self.assertFalse(result)
def test_create_broadcast_domain(self):
self.mock_object(self.client, 'send_request')
result = self.client._create_broadcast_domain(fake.BROADCAST_DOMAIN,
fake.IPSPACE,
mtu=fake.MTU)
net_port_broadcast_domain_create_args = {
'ipspace': fake.IPSPACE,
'broadcast-domain': fake.BROADCAST_DOMAIN,
'mtu': fake.MTU,
}
self.assertIsNone(result)
self.client.send_request.assert_has_calls([
mock.call('net-port-broadcast-domain-create',
net_port_broadcast_domain_create_args)])
def test_add_port_to_broadcast_domain(self):
self.mock_object(self.client, 'send_request')
add_port_to_broadcast_domain_args = {
'ipspace': fake.IPSPACE,
'broadcast-domain': fake.BROADCAST_DOMAIN,
'ports': {
'net-qualified-port-name': ':'.join([fake.NODE_NAME,
fake.VLAN_PORT])
}
}
result = self.client._add_port_to_broadcast_domain(
fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN,
fake.IPSPACE)
self.assertIsNone(result)
self.client.send_request.assert_has_calls([
mock.call('net-port-broadcast-domain-add-ports',
add_port_to_broadcast_domain_args)])
def test_add_port_to_broadcast_domain_already_present(self):
self.mock_object(self.client, 'send_request', self._mock_api_error(
code=netapp_api.
E_VIFMGR_PORT_ALREADY_ASSIGNED_TO_BROADCAST_DOMAIN))
result = self.client._add_port_to_broadcast_domain(
fake.NODE_NAME, fake.VLAN_PORT, fake.BROADCAST_DOMAIN,
fake.IPSPACE)
self.assertIsNone(result)
def test_add_port_to_broadcast_domain_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(exception.NetAppException,
self.client._add_port_to_broadcast_domain,
fake.NODE_NAME,
fake.VLAN_PORT,
fake.BROADCAST_DOMAIN,
fake.IPSPACE)
def test_network_interface_exists(self):
api_response = netapp_api.NaElement(
fake.NET_INTERFACE_GET_ONE_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
net_interface_get_args = {
'query': {
'net-interface-info': {
'address': fake.IP_ADDRESS,
'home-node': fake.NODE_NAME,
'home-port': fake.VLAN_PORT,
'netmask': fake.NETMASK,
'vserver': fake.VSERVER_NAME}
},
'desired-attributes': {
'net-interface-info': {
'interface-name': None,
}
}
}
result = self.client.network_interface_exists(
fake.VSERVER_NAME, fake.NODE_NAME, fake.PORT, fake.IP_ADDRESS,
fake.NETMASK, fake.VLAN)
self.client.send_request.assert_has_calls([
mock.call('net-interface-get-iter', net_interface_get_args)])
self.assertTrue(result)
def test_network_interface_exists_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
net_interface_get_args = {
'query': {
'net-interface-info': {
'address': fake.IP_ADDRESS,
'home-node': fake.NODE_NAME,
'home-port': fake.PORT,
'netmask': fake.NETMASK,
'vserver': fake.VSERVER_NAME}
},
'desired-attributes': {
'net-interface-info': {
'interface-name': None,
}
}
}
result = self.client.network_interface_exists(
fake.VSERVER_NAME, fake.NODE_NAME, fake.PORT, fake.IP_ADDRESS,
fake.NETMASK, None)
self.client.send_request.assert_has_calls([
mock.call('net-interface-get-iter', net_interface_get_args)])
self.assertFalse(result)
def test_list_network_interfaces(self):
api_response = netapp_api.NaElement(
fake.NET_INTERFACE_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
net_interface_get_args = {
'desired-attributes': {
'net-interface-info': {
'interface-name': None,
}
}
}
result = self.client.list_network_interfaces()
self.client.send_request.assert_has_calls([
mock.call('net-interface-get-iter', net_interface_get_args)])
self.assertSequenceEqual(fake.LIF_NAMES, result)
def test_list_network_interfaces_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.list_network_interfaces()
self.assertListEqual([], result)
def test_get_network_interfaces(self):
api_response = netapp_api.NaElement(
fake.NET_INTERFACE_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_network_interfaces()
self.client.send_request.assert_has_calls([
mock.call('net-interface-get-iter', None)])
self.assertSequenceEqual(fake.LIFS, result)
def test_get_network_interfaces_filtered_by_protocol(self):
api_response = netapp_api.NaElement(
fake.NET_INTERFACE_GET_ITER_RESPONSE_NFS)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_network_interfaces(protocols=['NFS'])
net_interface_get_args = {
'query': {
'net-interface-info': {
'data-protocols': {
'data-protocol': 'nfs',
}
}
}
}
self.client.send_request.assert_has_calls([
mock.call('net-interface-get-iter', net_interface_get_args)])
self.assertListEqual(fake.NFS_LIFS, result)
def test_get_network_interfaces_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_network_interfaces()
self.client.send_request.assert_has_calls([
mock.call('net-interface-get-iter', None)])
self.assertListEqual([], result)
def test_delete_network_interface(self):
self.mock_object(self.client, 'send_request')
self.client.delete_network_interface(fake.LIF_NAME)
net_interface_delete_args = {
'vserver': None,
'interface-name': fake.LIF_NAME
}
self.client.send_request.assert_has_calls([
mock.call('net-interface-delete', net_interface_delete_args)])
def test_get_node_for_aggregate(self):
api_response = netapp_api.NaElement(
fake.AGGR_GET_NODE_RESPONSE).get_child_by_name(
'attributes-list').get_children()
self.mock_object(self.client,
'_get_aggregates',
mock.Mock(return_value=api_response))
result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME)
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-ownership-attributes': {
'home-name': None,
},
},
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=[fake.SHARE_AGGREGATE_NAME],
desired_attributes=desired_attributes)])
self.assertEqual(fake.NODE_NAME, result)
def test_get_node_for_aggregate_none_requested(self):
result = self.client.get_node_for_aggregate(None)
self.assertIsNone(result)
def test_get_node_for_aggregate_api_not_found(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error(
netapp_api.EAPINOTFOUND)))
result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME)
self.assertIsNone(result)
def test_get_node_for_aggregate_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client.get_node_for_aggregate,
fake.SHARE_AGGREGATE_NAME)
def test_get_node_for_aggregate_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_node_for_aggregate(fake.SHARE_AGGREGATE_NAME)
self.assertIsNone(result)
def test_get_cluster_aggregate_capacities(self):
api_response = netapp_api.NaElement(
fake.AGGR_GET_SPACE_RESPONSE).get_child_by_name(
'attributes-list').get_children()
self.mock_object(self.client,
'_get_aggregates',
mock.Mock(return_value=api_response))
result = self.client.get_cluster_aggregate_capacities(
fake.SHARE_AGGREGATE_NAMES)
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-space-attributes': {
'size-available': None,
'size-total': None,
'size-used': None,
}
}
}
self.client._get_aggregates.assert_has_calls([
mock.call(
aggregate_names=fake.SHARE_AGGREGATE_NAMES,
desired_attributes=desired_attributes)])
expected = {
fake.SHARE_AGGREGATE_NAMES[0]: {
'available': 45670400,
'total': 943718400,
'used': 898048000,
},
fake.SHARE_AGGREGATE_NAMES[1]: {
'available': 4267659264,
'total': 7549747200,
'used': 3282087936,
},
}
self.assertDictEqual(expected, result)
def test_get_cluster_aggregate_capacities_not_found(self):
api_response = netapp_api.NaElement('none').get_children()
self.mock_object(self.client,
'_get_aggregates',
mock.Mock(return_value=api_response))
result = self.client.get_cluster_aggregate_capacities(
fake.SHARE_AGGREGATE_NAMES)
self.assertEqual({}, result)
def test_get_cluster_aggregate_capacities_none_requested(self):
result = self.client.get_cluster_aggregate_capacities([])
self.assertEqual({}, result)
def test_get_vserver_aggregate_capacities(self):
api_response = netapp_api.NaElement(fake.VSERVER_GET_RESPONSE)
self.mock_object(self.vserver_client,
'send_request',
mock.Mock(return_value=api_response))
result = self.vserver_client.get_vserver_aggregate_capacities()
vserver_args = {
'desired-attributes': {
'vserver-info': {
'vserver-name': None,
'vserver-aggr-info-list': {
'vserver-aggr-info': {
'aggr-name': None,
'aggr-availsize': None
}
}
}
}
}
self.vserver_client.send_request.assert_has_calls([
mock.call('vserver-get', vserver_args)])
self.assertDictEqual(fake.VSERVER_AGGREGATES, result)
def test_get_vserver_aggregate_capacities_partial_request(self):
api_response = netapp_api.NaElement(fake.VSERVER_GET_RESPONSE)
self.mock_object(self.vserver_client,
'send_request',
mock.Mock(return_value=api_response))
result = self.vserver_client.get_vserver_aggregate_capacities(
fake.SHARE_AGGREGATE_NAMES[0])
expected = {fake.SHARE_AGGREGATE_NAMES[0]:
fake.VSERVER_AGGREGATES[fake.SHARE_AGGREGATE_NAMES[0]]}
self.assertDictEqual(expected, result)
def test_get_vserver_aggregate_capacities_aggregate_not_found(self):
api_response = netapp_api.NaElement(
fake.VSERVER_GET_RESPONSE_NO_AGGREGATES)
self.mock_object(self.vserver_client,
'send_request',
mock.Mock(return_value=api_response))
result = self.vserver_client.get_vserver_aggregate_capacities()
self.assertDictEqual({}, result)
self.assertEqual(1, client_cmode.LOG.warning.call_count)
def test_get_vserver_aggregate_capacities_vserver_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.vserver_client,
'send_request',
mock.Mock(return_value=api_response))
self.assertRaises(exception.NetAppException,
self.vserver_client.get_vserver_aggregate_capacities)
def test_get_vserver_aggregate_capacities_none_requested(self):
result = self.client.get_vserver_aggregate_capacities([])
self.assertEqual({}, result)
def test_get_aggregates(self):
api_response = netapp_api.NaElement(fake.AGGR_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._get_aggregates()
self.client.send_request.assert_has_calls([
mock.call('aggr-get-iter', {})])
self.assertListEqual(
[aggr.to_string() for aggr in api_response.get_child_by_name(
'attributes-list').get_children()],
[aggr.to_string() for aggr in result])
def test_get_aggregates_with_filters(self):
api_response = netapp_api.NaElement(fake.AGGR_GET_SPACE_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
desired_attributes = {
'aggr-attributes': {
'aggregate-name': None,
'aggr-space-attributes': {
'size-total': None,
'size-available': None,
}
}
}
result = self.client._get_aggregates(
aggregate_names=fake.SHARE_AGGREGATE_NAMES,
desired_attributes=desired_attributes)
aggr_get_iter_args = {
'query': {
'aggr-attributes': {
'aggregate-name': '|'.join(fake.SHARE_AGGREGATE_NAMES),
}
},
'desired-attributes': desired_attributes
}
self.client.send_request.assert_has_calls([
mock.call('aggr-get-iter', aggr_get_iter_args)])
self.assertListEqual(
[aggr.to_string() for aggr in api_response.get_child_by_name(
'attributes-list').get_children()],
[aggr.to_string() for aggr in result])
def test_get_aggregates_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._get_aggregates()
self.client.send_request.assert_has_calls([
mock.call('aggr-get-iter', {})])
self.assertListEqual([], result)
def test_setup_security_services_ldap(self):
self.mock_object(self.client, 'send_request')
self.mock_object(self.vserver_client, 'configure_ldap')
self.client.setup_security_services([fake.LDAP_SECURITY_SERVICE],
self.vserver_client,
fake.VSERVER_NAME)
vserver_modify_args = {
'name-mapping-switch': [
{'nmswitch': 'ldap'},
{'nmswitch': 'file'},
],
'name-server-switch': [
{'nsswitch': 'ldap'},
{'nsswitch': 'file'},
],
'vserver-name': fake.VSERVER_NAME
}
self.client.send_request.assert_has_calls([
mock.call('vserver-modify', vserver_modify_args)])
self.vserver_client.configure_ldap.assert_has_calls([
mock.call(fake.LDAP_SECURITY_SERVICE)])
def test_setup_security_services_active_directory(self):
self.mock_object(self.client, 'send_request')
self.mock_object(self.vserver_client, 'configure_active_directory')
self.client.setup_security_services([fake.CIFS_SECURITY_SERVICE],
self.vserver_client,
fake.VSERVER_NAME)
vserver_modify_args = {
'name-mapping-switch': [
{'nmswitch': 'ldap'},
{'nmswitch': 'file'},
],
'name-server-switch': [
{'nsswitch': 'ldap'},
{'nsswitch': 'file'},
],
'vserver-name': fake.VSERVER_NAME
}
self.client.send_request.assert_has_calls([
mock.call('vserver-modify', vserver_modify_args)])
self.vserver_client.configure_active_directory.assert_has_calls([
mock.call(fake.CIFS_SECURITY_SERVICE, fake.VSERVER_NAME)])
def test_setup_security_services_kerberos(self):
self.mock_object(self.client, 'send_request')
self.mock_object(self.client, 'create_kerberos_realm')
self.mock_object(self.vserver_client, 'configure_kerberos')
self.client.setup_security_services([fake.KERBEROS_SECURITY_SERVICE],
self.vserver_client,
fake.VSERVER_NAME)
vserver_modify_args = {
'name-mapping-switch': [
{'nmswitch': 'ldap'},
{'nmswitch': 'file'},
],
'name-server-switch': [
{'nsswitch': 'ldap'},
{'nsswitch': 'file'},
],
'vserver-name': fake.VSERVER_NAME
}
self.client.send_request.assert_has_calls([
mock.call('vserver-modify', vserver_modify_args)])
self.client.create_kerberos_realm.assert_has_calls([
mock.call(fake.KERBEROS_SECURITY_SERVICE)])
self.vserver_client.configure_kerberos.assert_has_calls([
mock.call(fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME)])
def test_setup_security_services_invalid(self):
self.mock_object(self.client, 'send_request')
self.assertRaises(exception.NetAppException,
self.client.setup_security_services,
[fake.INVALID_SECURITY_SERVICE],
self.vserver_client,
fake.VSERVER_NAME)
vserver_modify_args = {
'name-mapping-switch': [
{'nmswitch': 'ldap'},
{'nmswitch': 'file'},
],
'name-server-switch': [
{'nsswitch': 'ldap'},
{'nsswitch': 'file'},
],
'vserver-name': fake.VSERVER_NAME
}
self.client.send_request.assert_has_calls([
mock.call('vserver-modify', vserver_modify_args)])
def test_enable_nfs(self):
self.mock_object(self.client, 'send_request')
self.client.enable_nfs()
nfs_service_modify_args = {'is-nfsv40-enabled': 'true'}
export_rule_create_args = {
'client-match': '0.0.0.0/0',
'policy-name': 'default',
'ro-rule': {
'security-flavor': 'any'
},
'rw-rule': {
'security-flavor': 'never'
}
}
self.client.send_request.assert_has_calls([
mock.call('nfs-enable'),
mock.call('nfs-service-modify', nfs_service_modify_args),
mock.call('export-rule-create', export_rule_create_args)])
def test_configure_ldap(self):
self.mock_object(self.client, 'send_request')
self.client.configure_ldap(fake.LDAP_SECURITY_SERVICE)
config_name = hashlib.md5(
six.b(fake.LDAP_SECURITY_SERVICE['id'])).hexdigest()
ldap_client_create_args = {
'ldap-client-config': config_name,
'servers': {'ip-address': fake.LDAP_SECURITY_SERVICE['server']},
'tcp-port': '389',
'schema': 'RFC-2307',
'bind-password': fake.LDAP_SECURITY_SERVICE['password']
}
ldap_config_create_args = {
'client-config': config_name,
'client-enabled': 'true'
}
self.client.send_request.assert_has_calls([
mock.call('ldap-client-create', ldap_client_create_args),
mock.call('ldap-config-create', ldap_config_create_args)])
def test_configure_active_directory(self):
self.mock_object(self.client, 'send_request')
self.mock_object(self.client, 'configure_dns')
self.client.configure_active_directory(fake.CIFS_SECURITY_SERVICE,
fake.VSERVER_NAME)
cifs_server = (
fake.VSERVER_NAME[0:7] + '..' + fake.VSERVER_NAME[-6:]).upper()
cifs_server_create_args = {
'admin-username': fake.CIFS_SECURITY_SERVICE['user'],
'admin-password': fake.CIFS_SECURITY_SERVICE['password'],
'force-account-overwrite': 'true',
'cifs-server': cifs_server,
'domain': fake.CIFS_SECURITY_SERVICE['domain'],
}
self.client.configure_dns.assert_called_with(
fake.CIFS_SECURITY_SERVICE)
self.client.send_request.assert_has_calls([
mock.call('cifs-server-create', cifs_server_create_args)])
def test_configure_active_directory_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.mock_object(self.client, 'configure_dns')
self.assertRaises(exception.NetAppException,
self.client.configure_active_directory,
fake.CIFS_SECURITY_SERVICE,
fake.VSERVER_NAME)
def test_create_kerberos_realm(self):
self.mock_object(self.client, 'send_request')
self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE)
kerberos_realm_create_args = {
'admin-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'],
'admin-server-port': '749',
'clock-skew': '5',
'comment': '',
'config-name': fake.KERBEROS_SECURITY_SERVICE['id'],
'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'],
'kdc-port': '88',
'kdc-vendor': 'other',
'password-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'],
'password-server-port': '464',
'realm': fake.KERBEROS_SECURITY_SERVICE['domain'].upper()
}
self.client.send_request.assert_has_calls([
mock.call('kerberos-realm-create', kerberos_realm_create_args)])
def test_create_kerberos_realm_already_present(self):
self.mock_object(self.client,
'send_request',
self._mock_api_error(code=netapp_api.EDUPLICATEENTRY))
self.client.create_kerberos_realm(fake.KERBEROS_SECURITY_SERVICE)
kerberos_realm_create_args = {
'admin-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'],
'admin-server-port': '749',
'clock-skew': '5',
'comment': '',
'config-name': fake.KERBEROS_SECURITY_SERVICE['id'],
'kdc-ip': fake.KERBEROS_SECURITY_SERVICE['server'],
'kdc-port': '88',
'kdc-vendor': 'other',
'password-server-ip': fake.KERBEROS_SECURITY_SERVICE['server'],
'password-server-port': '464',
'realm': fake.KERBEROS_SECURITY_SERVICE['domain'].upper()
}
self.client.send_request.assert_has_calls([
mock.call('kerberos-realm-create', kerberos_realm_create_args)])
self.assertEqual(1, client_cmode.LOG.debug.call_count)
def test_create_kerberos_realm_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(exception.NetAppException,
self.client.create_kerberos_realm,
fake.KERBEROS_SECURITY_SERVICE)
def test_configure_kerberos(self):
self.mock_object(self.client, 'send_request')
self.mock_object(self.client, 'configure_dns')
self.mock_object(self.client,
'list_network_interfaces',
mock.Mock(return_value=['lif1', 'lif2']))
self.client.configure_kerberos(
fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME)
spn = self.client._get_kerberos_service_principal_name(
fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME)
kerberos_config_modify_args1 = {
'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'],
'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'],
'interface-name': 'lif1',
'is-kerberos-enabled': 'true',
'service-principal-name': spn
}
kerberos_config_modify_args2 = {
'admin-password': fake.KERBEROS_SECURITY_SERVICE['password'],
'admin-user-name': fake.KERBEROS_SECURITY_SERVICE['user'],
'interface-name': 'lif2',
'is-kerberos-enabled': 'true',
'service-principal-name': spn
}
self.client.configure_dns.assert_called_with(
fake.KERBEROS_SECURITY_SERVICE)
self.client.send_request.assert_has_calls([
mock.call('kerberos-config-modify',
kerberos_config_modify_args1),
mock.call('kerberos-config-modify',
kerberos_config_modify_args2)])
def test_configure_kerberos_no_network_interfaces(self):
self.mock_object(self.client, 'send_request')
self.mock_object(self.client, 'configure_dns')
self.mock_object(self.client,
'list_network_interfaces',
mock.Mock(return_value=[]))
self.assertRaises(exception.NetAppException,
self.client.configure_kerberos,
fake.KERBEROS_SECURITY_SERVICE,
fake.VSERVER_NAME)
self.client.configure_dns.assert_called_with(
fake.KERBEROS_SECURITY_SERVICE)
def test_get_kerberos_service_principal_name(self):
spn = self.client._get_kerberos_service_principal_name(
fake.KERBEROS_SECURITY_SERVICE, fake.VSERVER_NAME
)
self.assertEqual(fake.KERBEROS_SERVICE_PRINCIPAL_NAME, spn)
def test_configure_dns_for_active_directory(self):
self.mock_object(self.client, 'send_request')
self.client.configure_dns(fake.CIFS_SECURITY_SERVICE)
net_dns_create_args = {
'domains': {'string': fake.CIFS_SECURITY_SERVICE['domain']},
'name-servers': {
'ip-address': fake.CIFS_SECURITY_SERVICE['dns_ip']
},
'dns-state': 'enabled'
}
self.client.send_request.assert_has_calls([
mock.call('net-dns-create', net_dns_create_args)])
def test_configure_dns_for_kerberos(self):
self.mock_object(self.client, 'send_request')
self.client.configure_dns(fake.KERBEROS_SECURITY_SERVICE)
net_dns_create_args = {
'domains': {'string': fake.KERBEROS_SECURITY_SERVICE['domain']},
'name-servers': {
'ip-address': fake.KERBEROS_SECURITY_SERVICE['dns_ip']
},
'dns-state': 'enabled'
}
self.client.send_request.assert_has_calls([
mock.call('net-dns-create', net_dns_create_args)])
def test_configure_dns_already_present(self):
self.mock_object(self.client,
'send_request',
self._mock_api_error(code=netapp_api.EDUPLICATEENTRY))
self.client.configure_dns(fake.KERBEROS_SECURITY_SERVICE)
net_dns_create_args = {
'domains': {'string': fake.KERBEROS_SECURITY_SERVICE['domain']},
'name-servers': {
'ip-address': fake.KERBEROS_SECURITY_SERVICE['dns_ip']
},
'dns-state': 'enabled'
}
self.client.send_request.assert_has_calls([
mock.call('net-dns-create', net_dns_create_args)])
self.assertEqual(1, client_cmode.LOG.error.call_count)
def test_configure_dns_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(exception.NetAppException,
self.client.configure_dns,
fake.KERBEROS_SECURITY_SERVICE)
def test_create_volume(self):
self.mock_object(self.client, 'send_request')
self.client.create_volume(
fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100)
volume_create_args = {
'containing-aggr-name': fake.SHARE_AGGREGATE_NAME,
'size': '100g',
'volume': fake.SHARE_NAME,
'junction-path': '/%s' % fake.SHARE_NAME,
}
self.client.send_request.assert_called_once_with('volume-create',
volume_create_args)
def test_create_volume_with_extra_specs(self):
self.mock_object(self.client, 'set_volume_max_files')
self.mock_object(self.client, 'enable_dedup')
self.mock_object(self.client, 'enable_compression')
self.mock_object(self.client, 'send_request')
self.client.create_volume(
fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME, 100,
thin_provisioned=True, language='en-US',
snapshot_policy='default', dedup_enabled=True,
compression_enabled=True, max_files=5000)
volume_create_args = {
'containing-aggr-name': fake.SHARE_AGGREGATE_NAME,
'size': '100g',
'volume': fake.SHARE_NAME,
'junction-path': '/%s' % fake.SHARE_NAME,
'space-reserve': 'none',
'language-code': 'en-US',
'snapshot-policy': 'default',
}
self.client.send_request.assert_called_with('volume-create',
volume_create_args)
self.client.set_volume_max_files.assert_called_once_with(
fake.SHARE_NAME, fake.MAX_FILES)
self.client.enable_dedup.assert_called_once_with(fake.SHARE_NAME)
self.client.enable_compression.assert_called_once_with(fake.SHARE_NAME)
def test_enable_dedup(self):
self.mock_object(self.client, 'send_request')
self.client.enable_dedup(fake.SHARE_NAME)
sis_enable_args = {'path': '/vol/%s' % fake.SHARE_NAME}
self.client.send_request.assert_called_once_with('sis-enable',
sis_enable_args)
def test_disable_dedup(self):
self.mock_object(self.client, 'send_request')
self.client.disable_dedup(fake.SHARE_NAME)
sis_disable_args = {'path': '/vol/%s' % fake.SHARE_NAME}
self.client.send_request.assert_called_once_with('sis-disable',
sis_disable_args)
def test_enable_compression(self):
self.mock_object(self.client, 'send_request')
self.client.enable_compression(fake.SHARE_NAME)
sis_set_config_args = {
'path': '/vol/%s' % fake.SHARE_NAME,
'enable-compression': 'true'
}
self.client.send_request.assert_called_once_with('sis-set-config',
sis_set_config_args)
def test_disable_compression(self):
self.mock_object(self.client, 'send_request')
self.client.disable_compression(fake.SHARE_NAME)
sis_set_config_args = {
'path': '/vol/%s' % fake.SHARE_NAME,
'enable-compression': 'false'
}
self.client.send_request.assert_called_once_with('sis-set-config',
sis_set_config_args)
def test_get_volume_efficiency_status(self):
api_response = netapp_api.NaElement(fake.SIS_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_volume_efficiency_status(fake.SHARE_NAME)
sis_get_iter_args = {
'query': {
'sis-status-info': {
'path': '/vol/%s' % fake.SHARE_NAME,
},
},
'desired-attributes': {
'sis-status-info': {
'state': None,
'is-compression-enabled': None,
},
},
}
self.client.send_request.assert_has_calls([
mock.call('sis-get-iter', sis_get_iter_args)])
expected = {'dedupe': True, 'compression': True}
self.assertDictEqual(expected, result)
def test_get_volume_efficiency_status_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_volume_efficiency_status(fake.SHARE_NAME)
expected = {'dedupe': False, 'compression': False}
self.assertDictEqual(expected, result)
def test_set_volume_max_files(self):
self.mock_object(self.client, 'send_request')
self.client.set_volume_max_files(fake.SHARE_NAME, fake.MAX_FILES)
volume_modify_iter_api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake.SHARE_NAME,
},
},
},
'attributes': {
'volume-attributes': {
'volume-inode-attributes': {
'files-total': fake.MAX_FILES,
},
},
},
}
self.client.send_request.assert_called_once_with(
'volume-modify-iter', volume_modify_iter_api_args)
def test_set_volume_name(self):
self.mock_object(self.client, 'send_request')
self.client.set_volume_name(fake.SHARE_NAME, 'new_name')
volume_rename_api_args = {
'volume': fake.SHARE_NAME,
'new-volume-name': 'new_name',
}
self.client.send_request.assert_called_once_with(
'volume-rename', volume_rename_api_args)
def test_manage_volume_no_optional_args(self):
self.mock_object(self.client, 'send_request')
mock_update_volume_efficiency_attributes = self.mock_object(
self.client, 'update_volume_efficiency_attributes')
self.client.manage_volume(fake.SHARE_AGGREGATE_NAME, fake.SHARE_NAME)
volume_modify_iter_api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'containing-aggregate-name': fake.SHARE_AGGREGATE_NAME,
'name': fake.SHARE_NAME,
},
},
},
'attributes': {
'volume-attributes': {
'volume-inode-attributes': {},
'volume-language-attributes': {},
'volume-snapshot-attributes': {},
'volume-space-attributes': {
'space-guarantee': 'volume',
},
},
},
}
self.client.send_request.assert_called_once_with(
'volume-modify-iter', volume_modify_iter_api_args)
mock_update_volume_efficiency_attributes.assert_called_once_with(
fake.SHARE_NAME, False, False)
def test_manage_volume_all_optional_args(self):
self.mock_object(self.client, 'send_request')
mock_update_volume_efficiency_attributes = self.mock_object(
self.client, 'update_volume_efficiency_attributes')
self.client.manage_volume(fake.SHARE_AGGREGATE_NAME,
fake.SHARE_NAME,
thin_provisioned=True,
snapshot_policy=fake.SNAPSHOT_POLICY_NAME,
language=fake.LANGUAGE,
dedup_enabled=True,
compression_enabled=False,
max_files=fake.MAX_FILES)
volume_modify_iter_api_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'containing-aggregate-name': fake.SHARE_AGGREGATE_NAME,
'name': fake.SHARE_NAME,
},
},
},
'attributes': {
'volume-attributes': {
'volume-inode-attributes': {
'files-total': fake.MAX_FILES,
},
'volume-language-attributes': {
'language': fake.LANGUAGE,
},
'volume-snapshot-attributes': {
'snapshot-policy': fake.SNAPSHOT_POLICY_NAME,
},
'volume-space-attributes': {
'space-guarantee': 'none',
},
},
},
}
self.client.send_request.assert_called_once_with(
'volume-modify-iter', volume_modify_iter_api_args)
mock_update_volume_efficiency_attributes.assert_called_once_with(
fake.SHARE_NAME, True, False)
@ddt.data(
{'existing': (True, True), 'desired': (True, True)},
{'existing': (True, True), 'desired': (False, False)},
{'existing': (True, True), 'desired': (True, False)},
{'existing': (True, False), 'desired': (True, False)},
{'existing': (True, False), 'desired': (False, False)},
{'existing': (True, False), 'desired': (True, True)},
{'existing': (False, False), 'desired': (False, False)},
{'existing': (False, False), 'desired': (True, False)},
{'existing': (False, False), 'desired': (True, True)},
)
@ddt.unpack
def test_update_volume_efficiency_attributes(self, existing, desired):
existing_dedupe = existing[0]
existing_compression = existing[1]
desired_dedupe = desired[0]
desired_compression = desired[1]
self.mock_object(
self.client,
'get_volume_efficiency_status',
mock.Mock(return_value={'dedupe': existing_dedupe,
'compression': existing_compression}))
mock_enable_compression = self.mock_object(self.client,
'enable_compression')
mock_disable_compression = self.mock_object(self.client,
'disable_compression')
mock_enable_dedup = self.mock_object(self.client, 'enable_dedup')
mock_disable_dedup = self.mock_object(self.client, 'disable_dedup')
self.client.update_volume_efficiency_attributes(
fake.SHARE_NAME, desired_dedupe, desired_compression)
if existing_dedupe == desired_dedupe:
self.assertFalse(mock_enable_dedup.called)
self.assertFalse(mock_disable_dedup.called)
elif existing_dedupe and not desired_dedupe:
self.assertFalse(mock_enable_dedup.called)
self.assertTrue(mock_disable_dedup.called)
elif not existing_dedupe and desired_dedupe:
self.assertTrue(mock_enable_dedup.called)
self.assertFalse(mock_disable_dedup.called)
if existing_compression == desired_compression:
self.assertFalse(mock_enable_compression.called)
self.assertFalse(mock_disable_compression.called)
elif existing_compression and not desired_compression:
self.assertFalse(mock_enable_compression.called)
self.assertTrue(mock_disable_compression.called)
elif not existing_compression and desired_compression:
self.assertTrue(mock_enable_compression.called)
self.assertFalse(mock_disable_compression.called)
def test_set_volume_size(self):
api_response = netapp_api.NaElement(fake.VOLUME_MODIFY_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.client.set_volume_size(fake.SHARE_NAME, 10)
volume_modify_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake.SHARE_NAME
}
}
},
'attributes': {
'volume-attributes': {
'volume-space-attributes': {
'size': 10737418240,
},
},
},
}
self.client.send_request.assert_has_calls([
mock.call('volume-modify-iter', volume_modify_iter_args)])
def test_set_volume_size_api_error(self):
api_response = netapp_api.NaElement(
fake.VOLUME_MODIFY_ITER_ERROR_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.assertRaises(netapp_api.NaApiError,
self.client.set_volume_size,
fake.SHARE_NAME,
10)
def test_volume_exists(self):
api_response = netapp_api.NaElement(fake.VOLUME_GET_NAME_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.volume_exists(fake.SHARE_NAME)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake.SHARE_NAME
}
}
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None
}
}
}
}
self.client.send_request.assert_has_calls([
mock.call('volume-get-iter', volume_get_iter_args)])
self.assertTrue(result)
def test_volume_exists_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.assertFalse(self.client.volume_exists(fake.SHARE_NAME))
def test_get_aggregate_for_volume(self):
api_response = netapp_api.NaElement(
fake.GET_AGGREGATE_FOR_VOLUME_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_for_volume(fake.SHARE_NAME)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake.SHARE_NAME
}
}
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'containing-aggregate-name': None,
'name': None
}
}
}
}
self.client.send_request.assert_has_calls([
mock.call('volume-get-iter', volume_get_iter_args)])
self.assertEqual(fake.SHARE_AGGREGATE_NAME, result)
def test_get_aggregate_for_volume_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.assertRaises(exception.NetAppException,
self.client.get_aggregate_for_volume,
fake.SHARE_NAME)
def test_volume_has_luns(self):
api_response = netapp_api.NaElement(fake.LUN_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.volume_has_luns(fake.SHARE_NAME)
lun_get_iter_args = {
'query': {
'lun-info': {
'volume': fake.SHARE_NAME,
},
},
'desired-attributes': {
'lun-info': {
'path': None,
},
},
}
self.client.send_request.assert_has_calls([
mock.call('lun-get-iter', lun_get_iter_args)])
self.assertTrue(result)
def test_volume_has_luns_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.volume_has_luns(fake.SHARE_NAME)
self.assertFalse(result)
def test_volume_has_junctioned_volumes(self):
api_response = netapp_api.NaElement(
fake.VOLUME_GET_ITER_JUNCTIONED_VOLUMES_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
fake_junction_path = '/%s' % fake.SHARE_NAME
self.mock_object(self.client,
'get_volume_junction_path',
mock.Mock(return_value=fake_junction_path))
result = self.client.volume_has_junctioned_volumes(fake.SHARE_NAME)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'junction-path': fake_junction_path + '/*',
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'name': None,
},
},
},
}
self.client.send_request.assert_has_calls([
mock.call('volume-get-iter', volume_get_iter_args)])
self.assertTrue(result)
def test_volume_has_junctioned_volumes_no_junction_path(self):
self.mock_object(self.client,
'get_volume_junction_path',
mock.Mock(return_value=''))
result = self.client.volume_has_junctioned_volumes(fake.SHARE_NAME)
self.assertFalse(result)
def test_volume_has_junctioned_volumes_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
fake_junction_path = '/%s' % fake.SHARE_NAME
self.mock_object(self.client,
'get_volume_junction_path',
mock.Mock(return_value=fake_junction_path))
result = self.client.volume_has_junctioned_volumes(fake.SHARE_NAME)
self.assertFalse(result)
def test_get_volume_at_junction_path(self):
api_response = netapp_api.NaElement(
fake.VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
fake_junction_path = '/%s' % fake.SHARE_NAME
result = self.client.get_volume_at_junction_path(fake_junction_path)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'junction-path': fake_junction_path,
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'containing-aggregate-name': None,
'junction-path': None,
'name': None,
'type': None,
'style': None,
},
'volume-space-attributes': {
'size': None,
}
},
},
}
expected = {
'aggregate': fake.SHARE_AGGREGATE_NAME,
'junction-path': fake_junction_path,
'name': fake.SHARE_NAME,
'type': 'rw',
'style': 'flex',
'size': fake.SHARE_SIZE,
}
self.client.send_request.assert_has_calls([
mock.call('volume-get-iter', volume_get_iter_args)])
self.assertDictEqual(expected, result)
def test_get_volume_at_junction_path_not_specified(self):
result = self.client.get_volume_at_junction_path(None)
self.assertIsNone(result)
def test_get_volume_at_junction_path_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
fake_junction_path = '/%s' % fake.SHARE_NAME
result = self.client.get_volume_at_junction_path(fake_junction_path)
self.assertIsNone(result)
def test_get_volume_to_manage(self):
api_response = netapp_api.NaElement(
fake.VOLUME_GET_ITER_VOLUME_TO_MANAGE_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_volume_to_manage(fake.SHARE_AGGREGATE_NAME,
fake.SHARE_NAME)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'containing-aggregate-name': fake.SHARE_AGGREGATE_NAME,
'name': fake.SHARE_NAME,
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-id-attributes': {
'containing-aggregate-name': None,
'junction-path': None,
'name': None,
'type': None,
'style': None,
},
'volume-space-attributes': {
'size': None,
}
},
},
}
expected = {
'aggregate': fake.SHARE_AGGREGATE_NAME,
'junction-path': '/%s' % fake.SHARE_NAME,
'name': fake.SHARE_NAME,
'type': 'rw',
'style': 'flex',
'size': fake.SHARE_SIZE,
}
self.client.send_request.assert_has_calls([
mock.call('volume-get-iter', volume_get_iter_args)])
self.assertDictEqual(expected, result)
def test_get_volume_to_manage_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_volume_to_manage(fake.SHARE_AGGREGATE_NAME,
fake.SHARE_NAME)
self.assertIsNone(result)
def test_create_volume_clone(self):
self.mock_object(self.client, 'send_request')
self.client.create_volume_clone(fake.SHARE_NAME,
fake.PARENT_SHARE_NAME,
fake.PARENT_SNAPSHOT_NAME)
volume_clone_create_args = {
'volume': fake.SHARE_NAME,
'parent-volume': fake.PARENT_SHARE_NAME,
'parent-snapshot': fake.PARENT_SNAPSHOT_NAME,
'junction-path': '/%s' % fake.SHARE_NAME
}
self.client.send_request.assert_has_calls([
mock.call('volume-clone-create', volume_clone_create_args)])
def test_split_volume_clone(self):
self.mock_object(self.client, 'send_request')
self.client.split_volume_clone(fake.SHARE_NAME)
volume_clone_split_args = {'volume': fake.SHARE_NAME}
self.client.send_request.assert_has_calls([
mock.call('volume-clone-split-start', volume_clone_split_args)])
def test_get_volume_junction_path(self):
api_response = netapp_api.NaElement(
fake.VOLUME_GET_VOLUME_PATH_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_volume_junction_path(fake.SHARE_NAME)
volume_get_volume_path_args = {
'volume': fake.SHARE_NAME,
'is-style-cifs': 'false'
}
self.client.send_request.assert_has_calls([
mock.call('volume-get-volume-path', volume_get_volume_path_args)])
self.assertEqual(fake.VOLUME_JUNCTION_PATH, result)
def test_get_volume_junction_path_cifs(self):
api_response = netapp_api.NaElement(
fake.VOLUME_GET_VOLUME_PATH_CIFS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_volume_junction_path(fake.SHARE_NAME,
is_style_cifs=True)
volume_get_volume_path_args = {
'volume': fake.SHARE_NAME,
'is-style-cifs': 'true'
}
self.client.send_request.assert_has_calls([
mock.call('volume-get-volume-path', volume_get_volume_path_args)])
self.assertEqual(fake.VOLUME_JUNCTION_PATH_CIFS, result)
def test_mount_volume_default_junction_path(self):
self.mock_object(self.client, 'send_request')
self.client.mount_volume(fake.SHARE_NAME)
volume_mount_args = {
'volume-name': fake.SHARE_NAME,
'junction-path': '/%s' % fake.SHARE_NAME,
}
self.client.send_request.assert_has_calls([
mock.call('volume-mount', volume_mount_args)])
def test_mount_volume(self):
self.mock_object(self.client, 'send_request')
fake_path = '/fake_path'
self.client.mount_volume(fake.SHARE_NAME, junction_path=fake_path)
volume_mount_args = {
'volume-name': fake.SHARE_NAME,
'junction-path': fake_path,
}
self.client.send_request.assert_has_calls([
mock.call('volume-mount', volume_mount_args)])
def test_offline_volume(self):
self.mock_object(self.client, 'send_request')
self.client.offline_volume(fake.SHARE_NAME)
volume_offline_args = {'name': fake.SHARE_NAME}
self.client.send_request.assert_has_calls([
mock.call('volume-offline', volume_offline_args)])
def test_offline_volume_already_offline(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error(
netapp_api.EVOLUMEOFFLINE)))
self.client.offline_volume(fake.SHARE_NAME)
volume_offline_args = {'name': fake.SHARE_NAME}
self.client.send_request.assert_has_calls([
mock.call('volume-offline', volume_offline_args)])
def test_offline_volume_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error()))
self.assertRaises(netapp_api.NaApiError,
self.client.offline_volume,
fake.SHARE_NAME)
def test__unmount_volume(self):
self.mock_object(self.client, 'send_request')
self.client._unmount_volume(fake.SHARE_NAME)
volume_unmount_args = {
'volume-name': fake.SHARE_NAME,
'force': 'false'
}
self.client.send_request.assert_has_calls([
mock.call('volume-unmount', volume_unmount_args)])
def test__unmount_volume_force(self):
self.mock_object(self.client, 'send_request')
self.client._unmount_volume(fake.SHARE_NAME, force=True)
volume_unmount_args = {'volume-name': fake.SHARE_NAME, 'force': 'true'}
self.client.send_request.assert_has_calls([
mock.call('volume-unmount', volume_unmount_args)])
def test__unmount_volume_already_unmounted(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error(
netapp_api.EVOL_NOT_MOUNTED)))
self.client._unmount_volume(fake.SHARE_NAME, force=True)
volume_unmount_args = {'volume-name': fake.SHARE_NAME, 'force': 'true'}
self.client.send_request.assert_has_calls([
mock.call('volume-unmount', volume_unmount_args)])
def test__unmount_volume_api_error(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error()))
self.assertRaises(netapp_api.NaApiError,
self.client._unmount_volume,
fake.SHARE_NAME,
force=True)
def test_unmount_volume(self):
self.mock_object(self.client, '_unmount_volume')
self.client.unmount_volume(fake.SHARE_NAME)
self.client._unmount_volume.assert_called_once_with(fake.SHARE_NAME,
force=False)
self.assertEqual(1, client_cmode.LOG.debug.call_count)
self.assertEqual(0, client_cmode.LOG.warning.call_count)
def test_unmount_volume_api_error(self):
self.mock_object(self.client,
'_unmount_volume',
self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client.unmount_volume,
fake.SHARE_NAME)
self.assertEqual(1, self.client._unmount_volume.call_count)
self.assertEqual(0, client_cmode.LOG.debug.call_count)
self.assertEqual(0, client_cmode.LOG.warning.call_count)
def test_unmount_volume_with_retries(self):
side_effect = [netapp_api.NaApiError(code=netapp_api.EAPIERROR,
message='...job ID...')] * 5
side_effect.append(None)
self.mock_object(self.client,
'_unmount_volume',
mock.Mock(side_effect=side_effect))
self.mock_object(time, 'sleep')
self.client.unmount_volume(fake.SHARE_NAME)
self.assertEqual(6, self.client._unmount_volume.call_count)
self.assertEqual(1, client_cmode.LOG.debug.call_count)
self.assertEqual(5, client_cmode.LOG.warning.call_count)
def test_unmount_volume_with_max_retries(self):
side_effect = [netapp_api.NaApiError(code=netapp_api.EAPIERROR,
message='...job ID...')] * 30
self.mock_object(self.client,
'_unmount_volume',
mock.Mock(side_effect=side_effect))
self.mock_object(time, 'sleep')
self.assertRaises(exception.NetAppException,
self.client.unmount_volume,
fake.SHARE_NAME)
self.assertEqual(10, self.client._unmount_volume.call_count)
self.assertEqual(0, client_cmode.LOG.debug.call_count)
self.assertEqual(10, client_cmode.LOG.warning.call_count)
def test_delete_volume(self):
self.mock_object(self.client, 'send_request')
self.client.delete_volume(fake.SHARE_NAME)
volume_destroy_args = {'name': fake.SHARE_NAME}
self.client.send_request.assert_has_calls([
mock.call('volume-destroy', volume_destroy_args)])
def test_create_snapshot(self):
self.mock_object(self.client, 'send_request')
self.client.create_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME)
snapshot_create_args = {
'volume': fake.SHARE_NAME,
'snapshot': fake.SNAPSHOT_NAME
}
self.client.send_request.assert_has_calls([
mock.call('snapshot-create', snapshot_create_args)])
@ddt.data({
'mock_return': fake.SNAPSHOT_GET_ITER_NOT_BUSY_RESPONSE,
'expected': {
'name': fake.SNAPSHOT_NAME,
'volume': fake.SHARE_NAME,
'busy': False,
'owners': set(),
}
}, {
'mock_return': fake.SNAPSHOT_GET_ITER_BUSY_RESPONSE,
'expected': {
'name': fake.SNAPSHOT_NAME,
'volume': fake.SHARE_NAME,
'busy': True,
'owners': {'volume clone'},
}
})
@ddt.unpack
def test_get_snapshot(self, mock_return, expected):
api_response = netapp_api.NaElement(mock_return)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME)
snapshot_get_iter_args = {
'query': {
'snapshot-info': {
'name': fake.SNAPSHOT_NAME,
'volume': fake.SHARE_NAME,
},
},
'desired-attributes': {
'snapshot-info': {
'name': None,
'volume': None,
'busy': None,
'snapshot-owners-list': {
'snapshot-owner': None,
}
},
},
}
self.client.send_request.assert_has_calls([
mock.call('snapshot-get-iter', snapshot_get_iter_args)])
self.assertDictEqual(expected, result)
@ddt.data({
'api_response_xml': fake.NO_RECORDS_RESPONSE,
'raised_exception': exception.SnapshotNotFound,
}, {
'api_response_xml': fake.SNAPSHOT_GET_ITER_NOT_UNIQUE_RESPONSE,
'raised_exception': exception.NetAppException,
}, {
'api_response_xml': fake.SNAPSHOT_GET_ITER_UNAVAILABLE_RESPONSE,
'raised_exception': exception.SnapshotUnavailable,
}, {
'api_response_xml': fake.SNAPSHOT_GET_ITER_OTHER_ERROR_RESPONSE,
'raised_exception': exception.NetAppException,
})
@ddt.unpack
def test_get_snapshot_error(self, api_response_xml, raised_exception):
api_response = netapp_api.NaElement(api_response_xml)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.assertRaises(raised_exception,
self.client.get_snapshot,
fake.SHARE_NAME,
fake.SNAPSHOT_NAME)
def test_delete_snapshot(self):
self.mock_object(self.client, 'send_request')
self.client.delete_snapshot(fake.SHARE_NAME, fake.SNAPSHOT_NAME)
snapshot_delete_args = {
'volume': fake.SHARE_NAME,
'snapshot': fake.SNAPSHOT_NAME
}
self.client.send_request.assert_has_calls([
mock.call('snapshot-delete', snapshot_delete_args)])
def test_create_cg_snapshot(self):
mock_start_cg_snapshot = self.mock_object(
self.client, '_start_cg_snapshot',
mock.Mock(return_value=fake.CG_SNAPSHOT_ID))
mock_commit_cg_snapshot = self.mock_object(
self.client, '_commit_cg_snapshot')
self.client.create_cg_snapshot([fake.SHARE_NAME, fake.SHARE_NAME_2],
fake.SNAPSHOT_NAME)
mock_start_cg_snapshot.assert_called_once_with(
[fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME)
mock_commit_cg_snapshot.assert_called_once_with(fake.CG_SNAPSHOT_ID)
def test_create_cg_snapshot_no_id(self):
mock_start_cg_snapshot = self.mock_object(
self.client, '_start_cg_snapshot', mock.Mock(return_value=None))
mock_commit_cg_snapshot = self.mock_object(
self.client, '_commit_cg_snapshot')
self.assertRaises(exception.NetAppException,
self.client.create_cg_snapshot,
[fake.SHARE_NAME, fake.SHARE_NAME_2],
fake.SNAPSHOT_NAME)
mock_start_cg_snapshot.assert_called_once_with(
[fake.SHARE_NAME, fake.SHARE_NAME_2], fake.SNAPSHOT_NAME)
self.assertFalse(mock_commit_cg_snapshot.called)
def test_start_cg_snapshot(self):
self.mock_object(self.client, 'send_request')
self.client._start_cg_snapshot([fake.SHARE_NAME, fake.SHARE_NAME_2],
fake.SNAPSHOT_NAME)
cg_start_args = {
'snapshot': fake.SNAPSHOT_NAME,
'timeout': 'relaxed',
'volumes': [
{'volume-name': fake.SHARE_NAME},
{'volume-name': fake.SHARE_NAME_2},
],
}
self.client.send_request.assert_has_calls([
mock.call('cg-start', cg_start_args)])
def test_commit_cg_snapshot(self):
self.mock_object(self.client, 'send_request')
self.client._commit_cg_snapshot(fake.CG_SNAPSHOT_ID)
cg_commit_args = {'cg-id': fake.CG_SNAPSHOT_ID}
self.client.send_request.assert_has_calls([
mock.call('cg-commit', cg_commit_args)])
def test_create_cifs_share(self):
self.mock_object(self.client, 'send_request')
self.client.create_cifs_share(fake.SHARE_NAME)
cifs_share_create_args = {
'path': '/%s' % fake.SHARE_NAME,
'share-name': fake.SHARE_NAME
}
self.client.send_request.assert_has_calls([
mock.call('cifs-share-create', cifs_share_create_args)])
def test_add_cifs_share_access(self):
self.mock_object(self.client, 'send_request')
self.client.add_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME)
cifs_share_access_control_create_args = {
'permission': 'full_control',
'share': fake.SHARE_NAME,
'user-or-group': fake.USER_NAME
}
self.client.send_request.assert_has_calls([
mock.call(
'cifs-share-access-control-create',
cifs_share_access_control_create_args)])
def test_remove_cifs_share_access(self):
self.mock_object(self.client, 'send_request')
self.client.remove_cifs_share_access(fake.SHARE_NAME, fake.USER_NAME)
cifs_share_access_control_delete_args = {
'user-or-group': fake.USER_NAME,
'share': fake.SHARE_NAME
}
self.client.send_request.assert_has_calls([
mock.call(
'cifs-share-access-control-delete',
cifs_share_access_control_delete_args)])
def test_remove_cifs_share(self):
self.mock_object(self.client, 'send_request')
self.client.remove_cifs_share(fake.SHARE_NAME)
cifs_share_delete_args = {'share-name': fake.SHARE_NAME}
self.client.send_request.assert_has_calls([
mock.call('cifs-share-delete', cifs_share_delete_args)])
def test_add_nfs_export_rule(self):
mock_get_nfs_export_rule_indices = self.mock_object(
self.client, '_get_nfs_export_rule_indices',
mock.Mock(return_value=[]))
mock_add_nfs_export_rule = self.mock_object(
self.client, '_add_nfs_export_rule')
mock_update_nfs_export_rule = self.mock_object(
self.client, '_update_nfs_export_rule')
self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME,
fake.IP_ADDRESS,
False)
mock_get_nfs_export_rule_indices.assert_called_once_with(
fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS)
mock_add_nfs_export_rule.assert_called_once_with(
fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False)
self.assertFalse(mock_update_nfs_export_rule.called)
def test_add_nfs_export_rule_single_existing(self):
mock_get_nfs_export_rule_indices = self.mock_object(
self.client, '_get_nfs_export_rule_indices',
mock.Mock(return_value=['1']))
mock_add_nfs_export_rule = self.mock_object(
self.client, '_add_nfs_export_rule')
mock_update_nfs_export_rule = self.mock_object(
self.client, '_update_nfs_export_rule')
mock_remove_nfs_export_rules = self.mock_object(
self.client, '_remove_nfs_export_rules')
self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME,
fake.IP_ADDRESS,
False)
mock_get_nfs_export_rule_indices.assert_called_once_with(
fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS)
self.assertFalse(mock_add_nfs_export_rule.called)
mock_update_nfs_export_rule.assert_called_once_with(
fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, '1')
mock_remove_nfs_export_rules.assert_called_once_with(
fake.EXPORT_POLICY_NAME, [])
def test_add_nfs_export_rule_multiple_existing(self):
mock_get_nfs_export_rule_indices = self.mock_object(
self.client, '_get_nfs_export_rule_indices',
mock.Mock(return_value=['2', '4', '6']))
mock_add_nfs_export_rule = self.mock_object(
self.client, '_add_nfs_export_rule')
mock_update_nfs_export_rule = self.mock_object(
self.client, '_update_nfs_export_rule')
mock_remove_nfs_export_rules = self.mock_object(
self.client, '_remove_nfs_export_rules')
self.client.add_nfs_export_rule(fake.EXPORT_POLICY_NAME,
fake.IP_ADDRESS,
False)
mock_get_nfs_export_rule_indices.assert_called_once_with(
fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS)
self.assertFalse(mock_add_nfs_export_rule.called)
mock_update_nfs_export_rule.assert_called_once_with(
fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS, False, '2')
mock_remove_nfs_export_rules.assert_called_once_with(
fake.EXPORT_POLICY_NAME, ['4', '6'])
@ddt.data({'readonly': False, 'rw_security_flavor': 'sys'},
{'readonly': True, 'rw_security_flavor': 'never'})
@ddt.unpack
def test__add_nfs_export_rule(self, readonly, rw_security_flavor):
self.mock_object(self.client, 'send_request')
self.client._add_nfs_export_rule(fake.EXPORT_POLICY_NAME,
fake.IP_ADDRESS,
readonly)
export_rule_create_args = {
'policy-name': fake.EXPORT_POLICY_NAME,
'client-match': fake.IP_ADDRESS,
'ro-rule': {
'security-flavor': 'sys',
},
'rw-rule': {
'security-flavor': rw_security_flavor,
},
'super-user-security': {
'security-flavor': 'sys',
},
}
self.client.send_request.assert_has_calls(
[mock.call('export-rule-create', export_rule_create_args)])
@ddt.data({'readonly': False, 'rw_security_flavor': 'sys', 'index': '2'},
{'readonly': True, 'rw_security_flavor': 'never', 'index': '4'})
@ddt.unpack
def test_update_nfs_export_rule(self, readonly, rw_security_flavor, index):
self.mock_object(self.client, 'send_request')
self.client._update_nfs_export_rule(fake.EXPORT_POLICY_NAME,
fake.IP_ADDRESS,
readonly,
index)
export_rule_modify_args = {
'policy-name': fake.EXPORT_POLICY_NAME,
'rule-index': index,
'client-match': fake.IP_ADDRESS,
'ro-rule': {
'security-flavor': 'sys',
},
'rw-rule': {
'security-flavor': rw_security_flavor,
},
'super-user-security': {
'security-flavor': 'sys',
},
}
self.client.send_request.assert_has_calls(
[mock.call('export-rule-modify', export_rule_modify_args)])
def test_get_nfs_export_rule_indices(self):
api_response = netapp_api.NaElement(fake.EXPORT_RULE_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._get_nfs_export_rule_indices(
fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS)
export_rule_get_iter_args = {
'query': {
'export-rule-info': {
'policy-name': fake.EXPORT_POLICY_NAME,
'client-match': fake.IP_ADDRESS,
},
},
'desired-attributes': {
'export-rule-info': {
'vserver-name': None,
'policy-name': None,
'client-match': None,
'rule-index': None,
},
},
}
self.assertListEqual(['1', '3'], result)
self.client.send_request.assert_has_calls([
mock.call('export-rule-get-iter', export_rule_get_iter_args)])
def test_remove_nfs_export_rule(self):
fake_indices = ['1', '3', '4']
mock_get_nfs_export_rule_indices = self.mock_object(
self.client, '_get_nfs_export_rule_indices',
mock.Mock(return_value=fake_indices))
mock_remove_nfs_export_rules = self.mock_object(
self.client, '_remove_nfs_export_rules')
self.client.remove_nfs_export_rule(fake.EXPORT_POLICY_NAME,
fake.IP_ADDRESS)
mock_get_nfs_export_rule_indices.assert_called_once_with(
fake.EXPORT_POLICY_NAME, fake.IP_ADDRESS)
mock_remove_nfs_export_rules.assert_called_once_with(
fake.EXPORT_POLICY_NAME, fake_indices)
def test_remove_nfs_export_rules(self):
fake_indices = ['1', '3']
self.mock_object(self.client, 'send_request')
self.client._remove_nfs_export_rules(fake.EXPORT_POLICY_NAME,
fake_indices)
self.client.send_request.assert_has_calls([
mock.call(
'export-rule-destroy',
{'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '1'}),
mock.call(
'export-rule-destroy',
{'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '3'})])
def test_remove_nfs_export_rules_not_found(self):
self.mock_object(self.client,
'send_request',
self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND))
self.client._remove_nfs_export_rules(fake.EXPORT_POLICY_NAME, ['1'])
self.client.send_request.assert_has_calls([
mock.call(
'export-rule-destroy',
{'policy-name': fake.EXPORT_POLICY_NAME, 'rule-index': '1'})])
def test_remove_nfs_export_rules_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client._remove_nfs_export_rules,
fake.EXPORT_POLICY_NAME,
['1'])
def test_clear_nfs_export_policy_for_volume(self):
mock_set_nfs_export_policy_for_volume = self.mock_object(
self.client, 'set_nfs_export_policy_for_volume')
self.client.clear_nfs_export_policy_for_volume(fake.SHARE_NAME)
mock_set_nfs_export_policy_for_volume.assert_called_once_with(
fake.SHARE_NAME, 'default')
def test_set_nfs_export_policy_for_volume(self):
self.mock_object(self.client, 'send_request')
self.client.set_nfs_export_policy_for_volume(fake.SHARE_NAME,
fake.EXPORT_POLICY_NAME)
volume_modify_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake.SHARE_NAME,
},
},
},
'attributes': {
'volume-attributes': {
'volume-export-attributes': {
'policy': fake.EXPORT_POLICY_NAME,
},
},
},
}
self.client.send_request.assert_has_calls([
mock.call('volume-modify-iter', volume_modify_iter_args)])
def test_get_nfs_export_policy_for_volume(self):
api_response = netapp_api.NaElement(
fake.VOLUME_GET_EXPORT_POLICY_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_nfs_export_policy_for_volume(fake.SHARE_NAME)
volume_get_iter_args = {
'query': {
'volume-attributes': {
'volume-id-attributes': {
'name': fake.SHARE_NAME,
},
},
},
'desired-attributes': {
'volume-attributes': {
'volume-export-attributes': {
'policy': None,
},
},
},
}
self.assertEqual(fake.EXPORT_POLICY_NAME, result)
self.client.send_request.assert_has_calls([
mock.call('volume-get-iter', volume_get_iter_args)])
def test_get_nfs_export_policy_for_volume_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
self.assertRaises(exception.NetAppException,
self.client.get_nfs_export_policy_for_volume,
fake.SHARE_NAME)
def test_create_nfs_export_policy(self):
self.mock_object(self.client, 'send_request')
self.client.create_nfs_export_policy(fake.EXPORT_POLICY_NAME)
export_policy_create_args = {'policy-name': fake.EXPORT_POLICY_NAME}
self.client.send_request.assert_has_calls([
mock.call('export-policy-create', export_policy_create_args)])
def test_create_nfs_export_policy_already_present(self):
self.mock_object(self.client,
'send_request',
self._mock_api_error(code=netapp_api.EDUPLICATEENTRY))
self.client.create_nfs_export_policy(fake.EXPORT_POLICY_NAME)
export_policy_create_args = {'policy-name': fake.EXPORT_POLICY_NAME}
self.client.send_request.assert_has_calls([
mock.call('export-policy-create', export_policy_create_args)])
def test_create_nfs_export_policy_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client.create_nfs_export_policy,
fake.EXPORT_POLICY_NAME)
def test_soft_delete_nfs_export_policy(self):
self.mock_object(self.client, 'delete_nfs_export_policy')
self.mock_object(self.client, 'rename_nfs_export_policy')
self.client.soft_delete_nfs_export_policy(fake.EXPORT_POLICY_NAME)
self.client.delete_nfs_export_policy.assert_has_calls([
mock.call(fake.EXPORT_POLICY_NAME)])
self.assertFalse(self.client.rename_nfs_export_policy.called)
def test_soft_delete_nfs_export_policy_api_error(self):
self.mock_object(self.client,
'delete_nfs_export_policy',
self._mock_api_error())
self.mock_object(self.client, 'rename_nfs_export_policy')
self.client.soft_delete_nfs_export_policy(fake.EXPORT_POLICY_NAME)
self.client.delete_nfs_export_policy.assert_has_calls([
mock.call(fake.EXPORT_POLICY_NAME)])
self.assertTrue(self.client.rename_nfs_export_policy.called)
def test_delete_nfs_export_policy(self):
self.mock_object(self.client, 'send_request')
self.client.delete_nfs_export_policy(fake.EXPORT_POLICY_NAME)
export_policy_destroy_args = {'policy-name': fake.EXPORT_POLICY_NAME}
self.client.send_request.assert_has_calls([
mock.call('export-policy-destroy', export_policy_destroy_args)])
def test_delete_nfs_export_policy_not_found(self):
self.mock_object(self.client,
'send_request',
self._mock_api_error(code=netapp_api.EOBJECTNOTFOUND))
self.client.delete_nfs_export_policy(fake.EXPORT_POLICY_NAME)
export_policy_destroy_args = {'policy-name': fake.EXPORT_POLICY_NAME}
self.client.send_request.assert_has_calls([
mock.call('export-policy-destroy', export_policy_destroy_args)])
def test_delete_nfs_export_policy_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client.delete_nfs_export_policy,
fake.EXPORT_POLICY_NAME)
def test_rename_nfs_export_policy(self):
self.mock_object(self.client, 'send_request')
self.client.rename_nfs_export_policy(fake.EXPORT_POLICY_NAME,
'new_policy_name')
export_policy_rename_args = {
'policy-name': fake.EXPORT_POLICY_NAME,
'new-policy-name': 'new_policy_name'
}
self.client.send_request.assert_has_calls([
mock.call('export-policy-rename', export_policy_rename_args)])
def test_prune_deleted_nfs_export_policies(self):
# Mock client lest we not be able to see calls on its copy.
self.mock_object(copy,
'deepcopy',
mock.Mock(return_value=self.client))
self.mock_object(self.client,
'_get_deleted_nfs_export_policies',
mock.Mock(return_value=fake.DELETED_EXPORT_POLICIES))
self.mock_object(self.client, 'delete_nfs_export_policy')
self.client.prune_deleted_nfs_export_policies()
self.assertTrue(self.client.delete_nfs_export_policy.called)
self.client.delete_nfs_export_policy.assert_has_calls(
[mock.call(policy) for policy in
fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME]])
def test_prune_deleted_nfs_export_policies_api_error(self):
self.mock_object(copy,
'deepcopy',
mock.Mock(return_value=self.client))
self.mock_object(self.client,
'_get_deleted_nfs_export_policies',
mock.Mock(return_value=fake.DELETED_EXPORT_POLICIES))
self.mock_object(self.client,
'delete_nfs_export_policy',
self._mock_api_error())
self.client.prune_deleted_nfs_export_policies()
self.assertTrue(self.client.delete_nfs_export_policy.called)
self.client.delete_nfs_export_policy.assert_has_calls(
[mock.call(policy) for policy in
fake.DELETED_EXPORT_POLICIES[fake.VSERVER_NAME]])
def test_get_deleted_nfs_export_policies(self):
api_response = netapp_api.NaElement(
fake.DELETED_EXPORT_POLICY_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client._get_deleted_nfs_export_policies()
export_policy_get_iter_args = {
'query': {
'export-policy-info': {
'policy-name': 'deleted_manila_*',
},
},
'desired-attributes': {
'export-policy-info': {
'policy-name': None,
'vserver': None,
},
},
}
self.assertSequenceEqual(fake.DELETED_EXPORT_POLICIES, result)
self.client.send_request.assert_has_calls([
mock.call('export-policy-get-iter', export_policy_get_iter_args)])
def test_get_ems_log_destination_vserver(self):
self.mock_object(self.client,
'get_ontapi_version',
mock.Mock(return_value=(1, 21)))
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
mock.Mock(return_value=[fake.ADMIN_VSERVER_NAME]))
result = self.client._get_ems_log_destination_vserver()
mock_list_vservers.assert_called_once_with(vserver_type='admin')
self.assertEqual(fake.ADMIN_VSERVER_NAME, result)
def test_get_ems_log_destination_vserver_future(self):
self.mock_object(self.client,
'get_ontapi_version',
mock.Mock(return_value=(2, 0)))
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
mock.Mock(return_value=[fake.ADMIN_VSERVER_NAME]))
result = self.client._get_ems_log_destination_vserver()
mock_list_vservers.assert_called_once_with(vserver_type='admin')
self.assertEqual(fake.ADMIN_VSERVER_NAME, result)
def test_get_ems_log_destination_vserver_legacy(self):
self.mock_object(self.client,
'get_ontapi_version',
mock.Mock(return_value=(1, 15)))
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
mock.Mock(return_value=[fake.NODE_VSERVER_NAME]))
result = self.client._get_ems_log_destination_vserver()
mock_list_vservers.assert_called_once_with(vserver_type='node')
self.assertEqual(fake.NODE_VSERVER_NAME, result)
def test_get_ems_log_destination_no_cluster_creds(self):
self.mock_object(self.client,
'get_ontapi_version',
mock.Mock(return_value=(1, 21)))
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
mock.Mock(side_effect=[[], [fake.VSERVER_NAME]]))
result = self.client._get_ems_log_destination_vserver()
mock_list_vservers.assert_has_calls([
mock.call(vserver_type='admin'),
mock.call(vserver_type='data')])
self.assertEqual(fake.VSERVER_NAME, result)
def test_get_ems_log_destination_vserver_not_found(self):
self.mock_object(self.client,
'get_ontapi_version',
mock.Mock(return_value=(1, 21)))
mock_list_vservers = self.mock_object(
self.client,
'list_vservers',
mock.Mock(return_value=[]))
self.assertRaises(exception.NotFound,
self.client._get_ems_log_destination_vserver)
mock_list_vservers.assert_has_calls([
mock.call(vserver_type='admin'),
mock.call(vserver_type='data'),
mock.call(vserver_type='node')])
def test_send_ems_log_message(self):
# Mock client lest we not be able to see calls on its copy.
self.mock_object(copy,
'deepcopy',
mock.Mock(return_value=self.client))
self.mock_object(self.client,
'_get_ems_log_destination_vserver',
mock.Mock(return_value=fake.ADMIN_VSERVER_NAME))
self.mock_object(self.client, 'send_request')
self.client.send_ems_log_message(fake.EMS_MESSAGE)
self.client.send_request.assert_has_calls([
mock.call('ems-autosupport-log', fake.EMS_MESSAGE)])
self.assertEqual(1, client_cmode.LOG.debug.call_count)
def test_send_ems_log_message_api_error(self):
# Mock client lest we not be able to see calls on its copy.
self.mock_object(copy,
'deepcopy',
mock.Mock(return_value=self.client))
self.mock_object(self.client,
'_get_ems_log_destination_vserver',
mock.Mock(return_value=fake.ADMIN_VSERVER_NAME))
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.client.send_ems_log_message(fake.EMS_MESSAGE)
self.client.send_request.assert_has_calls([
mock.call('ems-autosupport-log', fake.EMS_MESSAGE)])
self.assertEqual(1, client_cmode.LOG.warning.call_count)
def test_get_aggregate_raid_types(self):
api_response = netapp_api.NaElement(fake.AGGR_GET_RAID_TYPE_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_raid_types(
fake.SHARE_AGGREGATE_NAMES)
aggr_get_iter_args = {
'query': {
'aggr-attributes': {
'aggregate-name': '|'.join(fake.SHARE_AGGREGATE_NAMES),
}
},
'desired-attributes': {
'aggr-attributes': {
'aggregate-name': None,
'aggr-raid-attributes': {
'raid-type': None,
}
}
}
}
expected = {
fake.SHARE_AGGREGATE_NAMES[0]:
fake.SHARE_AGGREGATE_RAID_TYPES[0],
fake.SHARE_AGGREGATE_NAMES[1]:
fake.SHARE_AGGREGATE_RAID_TYPES[1]
}
self.client.send_request.assert_has_calls([
mock.call('aggr-get-iter', aggr_get_iter_args)])
self.assertDictEqual(expected, result)
def test_get_aggregate_raid_types_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_raid_types(
fake.SHARE_AGGREGATE_NAMES)
self.assertDictEqual({}, result)
def test_get_aggregate_disk_types(self):
api_response = netapp_api.NaElement(
fake.STORAGE_DISK_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAMES)
expected = {
fake.SHARE_AGGREGATE_NAMES[0]:
fake.SHARE_AGGREGATE_DISK_TYPE,
fake.SHARE_AGGREGATE_NAMES[1]:
fake.SHARE_AGGREGATE_DISK_TYPE
}
self.assertEqual(len(fake.SHARE_AGGREGATE_NAMES),
self.client.send_request.call_count)
self.assertDictEqual(expected, result)
def test_get_aggregate_disk_types_not_found(self):
api_response = netapp_api.NaElement(fake.NO_RECORDS_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.get_aggregate_disk_types(
fake.SHARE_AGGREGATE_NAMES)
self.assertEqual(len(fake.SHARE_AGGREGATE_NAMES),
self.client.send_request.call_count)
self.assertDictEqual({}, result)
def test_check_for_cluster_credentials(self):
api_response = netapp_api.NaElement(fake.SYSTEM_NODE_GET_ITER_RESPONSE)
self.mock_object(self.client,
'send_request',
mock.Mock(return_value=api_response))
result = self.client.check_for_cluster_credentials()
self.assertTrue(result)
def test_check_for_cluster_credentials_not_cluster(self):
self.mock_object(self.client,
'send_request',
mock.Mock(side_effect=self._mock_api_error(
netapp_api.EAPINOTFOUND)))
result = self.client.check_for_cluster_credentials()
self.assertFalse(result)
def test_check_for_cluster_credentials_api_error(self):
self.mock_object(self.client, 'send_request', self._mock_api_error())
self.assertRaises(netapp_api.NaApiError,
self.client.check_for_cluster_credentials)
| apache-2.0 | -2,452,308,389,529,523,000 | 36.012328 | 79 | 0.547084 | false |
jmoss2020/moss-advprog | Shapes.py | 1 | 3209 | class Circle():
def __init__(self, r):
self.r = r
def area(self):
return (self.r ** 2) * 3.14
def perimeter(self):
return self.r * 6.28
def __str__(self):
return "Circle has a radius of %.2f, an area of %.2f, and a perimeter of %.2f." % (self.r, self.area, self.perimeter)
class Rectangle():
def __init__(self, x, y):
self.x = x
self.y = y
def area(self):
return self.x * self.y
def perimeter(self):
return 2 * self.x + 2 * self.y
def __str__(self):
return "Rectangle has a hight of %.2f, a width of %.2f, an area of %.2f, and a perimeter of %.2f." % (self.y, self.x, self.area(), self.perimeter())
class Square(Rectangle):
def __init__(self,x):
self.x = x
self.y = x
def __str__(self):
return "Square has a side length of %.2f, an area of %.2f, and a perimeter of %.2f." % (self.y, self.area(), self.perimeter())
class RightTriangle():
def __init__(self, x, y):
self.x = x
self.y = y
self.hyp = self.hypotenuse()
def area(self):
return 0.5 * self.x * self.y
def hypotenuse(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def perimeter(self):
return self.hyp + self.x + self.y
def __str__(self):
return "Triangle has a hight of %.2f, a base of %.2f an area of %.2f, and a perimeter of %.2f." % (self.y, self.x, self.area(), self.perimeter())
class EquilateralRightTriangle(RightTriangle):
def __init__(self, x):
self.x = x
self.y = x
self.hyp = self.hypotenuse()
def __str__(self):
return "Triangle has a base and hight of %.2f an area of %.2f, and a perimeter of %.2f." % (self.y, self.area(), self.perimeter())
class Prism():
def surfacearea(self):
return 2 * self.area() + self.z * self.perimeter()
def volume(self):
return self.area() * self.z
class Cube(Square,Prism):
def __init__(self, x):
self.x = x
self.y = x
self.z = x
def __str__(self):
return "Cube has a width, hieght, and depth of %.2f, a surfacearea of %.2f, and a volume of %.2f." % (self.x, self.surfacearea(), self.volume())
class TriangularPrism(RightTriangle,Prism):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self.hyp = self.hypotenuse()
def __str__(self):
return "Triangular Prism has a width of %.2f, a hieght of %.2f, a depth of %.2f, a surfacearea of %.2f, and a volume of %.2f." % (self.x, self.y, self.z, self.surfacearea(), self.volume())
class Cylinder(Circle,Prism):
def __init__(self, r, z):
self.r = r
self.z = z
def __str__(self):
return "Cylinder has a radius of %.2f, a hieght of %.2f, a surfacearea of %.2f, and a volume of %.2f." % (self.r, self.z, self.surfacearea(), self.volume())
# circle1=Circle(5)
# print circle1
# rectangle1=Rectangle(3,4)
# print rectangle1s
# square1=Square(6)
# print square1
# RT=RightTriangle(3,4)
# print RT
# ERT=EquilateralRightTriangle(2)
# print ERT
# Cube1=Cube(4)
# print Cube1
# TP=TriangularPrism(3,5)
# print TP
Cyl=Cylinder(1,2)
print Cyl
| gpl-3.0 | 700,869,985,878,508,000 | 27.651786 | 196 | 0.569959 | false |
alexryndin/ambari | ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py | 1 | 20188 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
from resource_management.core.exceptions import Fail
@patch("os.path.isfile", new = MagicMock(return_value=True))
@patch("glob.glob", new = MagicMock(return_value=["one", "two"]))
class TestWebHCatServer(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
STACK_VERSION = "2.0.6"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', 'cd /var/run/webhcat ; /usr/hdp/current/hive-webhcat/sbin/webhcat_server.sh start',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client'},
not_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
user = 'hcat',
)
self.assertNoMoreResources()
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/hdp/current/hive-webhcat/sbin/webhcat_server.sh stop',
user = 'hcat',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client' }
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/webhcat/webhcat.pid`',
only_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
ignore_failures = True
)
self.assertResourceCalled('Execute', "! (ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1)")
self.assertResourceCalled('File', '/var/run/webhcat/webhcat.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
@patch("webhcat_service.graceful_stop", new = MagicMock(side_effect=Fail))
def test_stop_graceful_stop_failed(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', "find /var/log/webhcat -maxdepth 1 -type f -name '*' -exec echo '==> {} <==' \\; -exec tail -n 40 {} \\;",
logoutput = True,
ignore_failures = True,
user = 'hcat',
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/webhcat/webhcat.pid`',
only_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
ignore_failures = True
)
self.assertResourceCalled('Execute', "! (ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1)")
self.assertResourceCalled('File', '/var/run/webhcat/webhcat.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "start",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', 'cd /var/run/webhcat ; /usr/hdp/current/hive-webhcat/sbin/webhcat_server.sh start',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client'},
not_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
user = 'hcat',
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "stop",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/hdp/current/hive-webhcat/sbin/webhcat_server.sh stop',
user = 'hcat',
environment = {'HADOOP_HOME': '/usr/hdp/current/hadoop-client' }
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/webhcat/webhcat.pid`',
only_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
ignore_failures = True
)
self.assertResourceCalled('Execute', "! (ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1)")
self.assertResourceCalled('File', '/var/run/webhcat/webhcat.pid',
action = ['delete'],
)
self.assertNoMoreResources()
@patch("webhcat_service.graceful_stop", new = MagicMock(side_effect=Fail))
def test_stop_secured_graceful_stop_failed(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "stop",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', "find /var/log/webhcat -maxdepth 1 -type f -name '*' -exec echo '==> {} <==' \\; -exec tail -n 40 {} \\;",
logoutput = True,
ignore_failures = True,
user = 'hcat',
)
self.assertResourceCalled('Execute', 'ambari-sudo.sh kill -9 `cat /var/run/webhcat/webhcat.pid`',
only_if = "ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1",
ignore_failures = True
)
self.assertResourceCalled('Execute', "! (ls /var/run/webhcat/webhcat.pid >/dev/null 2>&1 && ps -p `cat /var/run/webhcat/webhcat.pid` >/dev/null 2>&1)")
self.assertResourceCalled('File', '/var/run/webhcat/webhcat.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def assert_configure_default(self):
self.assertResourceCalled('Directory', '/var/run/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/log/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/etc/hive-webhcat/conf',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
cd_access = 'a'
)
self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
owner = 'hcat',
group = 'hadoop',
conf_dir = '/etc/hive-webhcat/conf',
configurations = self.getConfig()['configurations']['webhcat-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['webhcat-site']
)
self.assertResourceCalled('File', '/etc/hive-webhcat/conf/webhcat-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['webhcat-env']['content']),
owner = 'hcat',
group = 'hadoop',
)
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-webhcat/conf',
cd_access = 'a',
create_parents = True
)
self.assertResourceCalled('File', '/etc/hive-webhcat/conf/webhcat-log4j.properties',
content = InlineTemplate('log4jproperties\nline2'),
owner = 'hcat',
group = 'hadoop',
mode = 0644,
)
def assert_configure_secured(self):
self.assertResourceCalled('Directory', '/var/run/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/var/log/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755,
)
self.assertResourceCalled('Directory', '/etc/hive-webhcat/conf',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
cd_access = 'a'
)
self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
owner = 'hcat',
group = 'hadoop',
conf_dir = '/etc/hive-webhcat/conf',
configurations = self.getConfig()['configurations']['webhcat-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['webhcat-site']
)
self.assertResourceCalled('File', '/etc/hive-webhcat/conf/webhcat-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['webhcat-env']['content']),
owner = 'hcat',
group = 'hadoop',
)
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-webhcat/conf',
cd_access = 'a',
create_parents = True
)
self.assertResourceCalled('File', '/etc/hive-webhcat/conf/webhcat-log4j.properties',
content = InlineTemplate('log4jproperties\nline2'),
owner = 'hcat',
group = 'hadoop',
mode = 0644,
)
def test_pre_upgrade_restart(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart_23(self, call_mock):
import sys
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None, '')],
mocks_dict = mocks_dict)
self.assertTrue("params" in sys.modules)
self.assertTrue(sys.modules["params"].webhcat_conf_dir is not None)
self.assertTrue("/usr/hdp/current/hive-webhcat/etc/webhcat" == sys.modules["params"].webhcat_conf_dir)
self.assertResourceCalledIgnoreEarlier('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
self.assertNoMoreResources()
self.assertEquals(2, mocks_dict['call'].call_count)
self.assertEquals(2, mocks_dict['checked_call'].call_count)
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hive-hcatalog', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hive-hcatalog', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[1][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[1][0][0])
@patch("resource_management.core.shell.call")
def test_rolling_restart_configure(self, call_mock):
import sys
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
classname = "WebHCatServer",
command = "configure",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalled('Directory', '/var/run/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755)
self.assertResourceCalled('Directory', '/var/log/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
mode = 0755)
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-webhcat/etc/webhcat',
owner = 'hcat',
group = 'hadoop',
create_parents = True,
cd_access = 'a',)
self.assertResourceCalled('XmlConfig', 'webhcat-site.xml',
owner = 'hcat',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hive-webhcat/etc/webhcat',
configurations = self.getConfig()['configurations']['webhcat-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['webhcat-site'])
self.assertResourceCalled('XmlConfig', 'hive-site.xml',
owner = 'hive',
group = 'hadoop',
conf_dir = '/usr/hdp/2.3.0.0-1234/hive/conf',
configuration_attributes = {u'final': {u'hive.optimize.bucketmapjoin.sortedmerge': u'true',
u'javax.jdo.option.ConnectionDriverName': u'true',
u'javax.jdo.option.ConnectionPassword': u'true'}},
configurations = self.getConfig()['configurations']['hive-site'],
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
conf_dir = '/usr/hdp/2.3.0.0-1234/hadoop/conf',
configuration_attributes = {u'final': {u'yarn.nodemanager.container-executor.class': u'true',
u'yarn.nodemanager.disk-health-checker.min-healthy-disks': u'true',
u'yarn.nodemanager.local-dirs': u'true'}},
configurations = self.getConfig()['configurations']['yarn-site'],
)
self.assertResourceCalled('File', '/usr/hdp/current/hive-webhcat/etc/webhcat/webhcat-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['webhcat-env']['content']),
owner = 'hcat',
group = 'hadoop')
self.assertResourceCalled('Directory', '/usr/hdp/current/hive-webhcat/etc/webhcat',
cd_access = 'a',
create_parents = True)
self.assertResourceCalled('File', '/usr/hdp/current/hive-webhcat/etc/webhcat/webhcat-log4j.properties',
content = InlineTemplate('log4jproperties\nline2'),
owner = 'hcat',
group = 'hadoop',
mode = 0644)
self.assertNoMoreResources()
| apache-2.0 | -1,059,206,473,547,598,500 | 46.952494 | 160 | 0.553101 | false |
chancecoin/chancecoin | lib/blocks.py | 1 | 25711 | """
Initialise database.
Sieve blockchain for Chancecoin transactions, and add them to the database.
"""
import os
import time
import binascii
import struct
import decimal
D = decimal.Decimal
import logging
from . import (config, exceptions, util, bitcoin)
from . import (send, order, btcpay, bet, burn, cancel)
def parse_tx (db, tx):
parse_tx_cursor = db.cursor()
# Burns.
if tx['destination'] == config.UNSPENDABLE:
burn.parse(db, tx)
return
try:
message_type_id = struct.unpack(config.TXTYPE_FORMAT, tx['data'][:4])[0]
except:
# Mark transaction as of unsupported type.
message_type_id = None
message = tx['data'][4:]
if message_type_id == send.ID:
send.parse(db, tx, message)
elif message_type_id == order.ID:
order.parse(db, tx, message)
elif message_type_id == btcpay.ID:
btcpay.parse(db, tx, message)
elif message_type_id == bet.ID:
bet.parse(db, tx, message)
elif message_type_id == cancel.ID:
cancel.parse(db, tx, message)
else:
parse_tx_cursor.execute('''UPDATE transactions \
SET supported=? \
WHERE tx_hash=?''',
(False, tx['tx_hash']))
logging.info('Unsupported transaction: hash {}; data {}'.format(tx['tx_hash'], tx['data']))
# resolve unresolved bets
bet.resolve(db)
parse_tx_cursor.close()
def parse_block (db, block_index, block_time):
"""This is a separate function from follow() so that changing the parsing
rules doesn't require a full database rebuild. If parsing rules are changed
(but not data identification), then just restart `chancecoin.py follow`.
"""
parse_block_cursor = db.cursor()
# Expire orders and bets.
order.expire(db, block_index)
# Parse transactions, sorting them by type.
parse_block_cursor.execute('''SELECT * FROM transactions \
WHERE block_index=? ORDER BY tx_index''',
(block_index,))
transactions = parse_block_cursor.fetchall()
for tx in transactions:
parse_tx(db, tx)
parse_block_cursor.close()
def initialise(db):
initialise_cursor = db.cursor()
# Blocks
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS blocks(
block_index INTEGER PRIMARY KEY,
block_hash TEXT UNIQUE,
block_time INTEGER)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
blocks_block_index_idx ON blocks (block_index)
''')
# Transactions
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS transactions(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
block_time INTEGER,
source TEXT,
destination TEXT,
btc_amount INTEGER,
fee INTEGER,
data BLOB,
supported BOOL DEFAULT 1)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
transactions_block_index_idx ON transactions (block_index)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
transactions_tx_index_idx ON transactions (tx_index)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
transactions_tx_hash_idx ON transactions (tx_hash)
''')
# Purge database of blocks, transactions from before BLOCK_FIRST.
initialise_cursor.execute('''DELETE FROM blocks WHERE block_index<?''', (config.BLOCK_FIRST,))
initialise_cursor.execute('''DELETE FROM transactions WHERE block_index<?''', (config.BLOCK_FIRST,))
# (Valid) debits
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS debits(
block_index INTEGER,
address TEXT,
asset TEXT,
amount INTEGER,
calling_function TEXT,
event TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
debits_address_idx ON debits (address)
''')
# (Valid) credits
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS credits(
block_index INTEGER,
address TEXT,
asset TEXT,
amount INTEGER,
calling_function TEXT,
event TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
credits_address_idx ON credits (address)
''')
# Balances
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS balances(
address TEXT,
asset TEXT,
amount INTEGER
)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
address_idx ON balances (address)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
asset_idx ON balances (asset)
''')
# Sends
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS sends(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
destination TEXT,
asset TEXT,
amount INTEGER,
validity TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
sends_block_index_idx ON sends (block_index)
''')
# Orders
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS orders(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
give_asset TEXT,
give_amount INTEGER,
give_remaining INTEGER,
get_asset TEXT,
get_amount INTEGER,
get_remaining INTEGER,
expiration INTEGER,
expire_index INTEGER,
fee_required INTEGER,
fee_provided INTEGER,
fee_remaining INTEGER,
validity TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON orders (block_index)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
expire_index_idx ON orders (expire_index)
''')
# Order Matches
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS order_matches(
id TEXT PRIMARY KEY,
tx0_index INTEGER,
tx0_hash TEXT,
tx0_address TEXT,
tx1_index INTEGER,
tx1_hash TEXT,
tx1_address TEXT,
forward_asset TEXT,
forward_amount INTEGER,
backward_asset TEXT,
backward_amount INTEGER,
tx0_block_index INTEGER,
tx1_block_index INTEGER,
tx0_expiration INTEGER,
tx1_expiration INTEGER,
match_expire_index INTEGER,
validity TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
match_expire_index_idx ON order_matches (match_expire_index)
''')
# BTCpays
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS btcpays(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
destination TEXT,
btc_amount INTEGER,
order_match_id TEXT,
validity TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON btcpays (block_index)
''')
# Bets
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS bets(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
bet INTEGER,
chance REAL,
payout REAL,
profit INTEGER,
cha_supply INTEGER,
validity TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON bets (block_index)
''')
# Bet
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS bets(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
bet INTEGER,
chance REAL,
payout REAL,
profit INTEGER,
validity TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON bets (block_index)
''')
# Burns
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS burns(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
burned INTEGER,
earned INTEGER,
validity TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
validity_idx ON burns (validity)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
address_idx ON burns (address)
''')
# Cancels
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS cancels(
tx_index INTEGER PRIMARY KEY,
tx_hash TEXT UNIQUE,
block_index INTEGER,
source TEXT,
offer_hash TEXT,
validity TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
cancels_block_index_idx ON cancels (block_index)
''')
# Order Expirations
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS order_expirations(
order_index INTEGER PRIMARY KEY,
order_hash TEXT UNIQUE,
source TEXT,
block_index INTEGER)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON order_expirations (block_index)
''')
# Order Match Expirations
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS order_match_expirations(
order_match_id TEXT PRIMARY KEY,
tx0_address TEXT,
tx1_address TEXT,
block_index INTEGER)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON order_match_expirations (block_index)
''')
# Messages
initialise_cursor.execute('''CREATE TABLE IF NOT EXISTS messages(
message_index INTEGER PRIMARY KEY,
block_index INTEGER,
command TEXT,
category TEXT,
bindings TEXT)
''')
initialise_cursor.execute('''CREATE INDEX IF NOT EXISTS
block_index_idx ON messages (block_index)
''')
initialise_cursor.close()
def get_tx_info (tx):
"""
The destination, if it exists, always comes before the data output; the
change, if it exists, always comes after.
"""
# Fee is the input values minus output values.
fee = D(0)
# Get destination output and data output.
destination, btc_amount, data = None, None, b''
# Check to see if this is a burn
for vout in tx['vout']:
if 'addresses' in vout['scriptPubKey']:
if vout['scriptPubKey']['addresses'][0] == config.UNSPENDABLE:
address = vout['scriptPubKey']['addresses'][0]
destination, btc_amount = address, round(D(vout['value']) * config.UNIT)
for vout in tx['vout']:
fee -= D(vout['value']) * config.UNIT
# Sum data chunks to get data. (Can mix OP_RETURN and multi-sig.)
asm = vout['scriptPubKey']['asm'].split(' ')
if len(asm) == 2 and asm[0] == 'OP_RETURN': # OP_RETURN
try: data_chunk = binascii.unhexlify(bytes(asm[1], 'utf-8'))
except binascii.Error: continue
data += data_chunk
elif len(asm) >= 5 and asm[0] == '1' and asm[3] == '2' and asm[4] == 'OP_CHECKMULTISIG': # Multi-sig
try: data_pubkey = binascii.unhexlify(bytes(asm[2], 'utf-8'))
except binascii.Error: continue
data_chunk_length = data_pubkey[0] # No ord() necessary.
data_chunk = data_pubkey[1:data_chunk_length + 1]
data += data_chunk
# Destination is the first output before the data.
if not destination and not btc_amount and not data:
if 'addresses' in vout['scriptPubKey']:
address = vout['scriptPubKey']['addresses'][0]
try: # If address is valid…
bitcoin.base58_decode(address, config.ADDRESSVERSION)
destination, btc_amount = address, round(D(vout['value']) * config.UNIT)
continue
except:
pass
# Check for, and strip away, prefix (except for burns).
if destination == config.UNSPENDABLE:
pass
elif data[:len(config.PREFIX)] == config.PREFIX:
data = data[len(config.PREFIX):]
else:
return b'', None, None, None, None
# Only look for source if data were found (or destination is UNSPENDABLE), for speed.
if not data and destination != config.UNSPENDABLE:
return b'', None, None, None, None
# Collect all possible source addresses; ignore coinbase transactions.
source_list = []
for vin in tx['vin']: # Loop through input transactions.
if 'coinbase' in vin: return b'', None, None, None, None
vin_tx = bitcoin.rpc('getrawtransaction', [vin['txid'], 1]) # Get the full transaction data for this input transaction.
vout = vin_tx['vout'][vin['vout']]
fee += D(vout['value']) * config.UNIT
addresses = vout['scriptPubKey']['addresses']
if len(addresses) != 1: return b'', None, None, None, None # NOTE: Disallow multi‐sig inputs.
source_list.append(addresses[0])
# Require that all possible source addresses be the same.
if all(x == source_list[0] for x in source_list):
source = source_list[0]
elif destination == config.UNSPENDABLE: #if this is a burn, take the source as the first address
source = source_list[0]
else:
source = None
return source, destination, btc_amount, round(fee), data
def reparse (db, block_index=None, quiet=False):
"""Reparse all transactions (atomically). If block_index is set, rollback
to the end of that block.
"""
# TODO: This is not thread-safe!
logging.warning('Status: Reparsing all transactions.')
cursor = db.cursor()
with db:
# For rollbacks, just delete new blocks and then reparse what’s left.
if block_index:
cursor.execute('''DELETE FROM blocks WHERE block_index > ?''', (block_index,))
cursor.execute('''DELETE FROM transactions WHERE block_index > ?''', (block_index,))
# Delete all of the results of parsing.
cursor.execute('''DROP TABLE IF EXISTS debits''')
cursor.execute('''DROP TABLE IF EXISTS credits''')
cursor.execute('''DROP TABLE IF EXISTS balances''')
cursor.execute('''DROP TABLE IF EXISTS sends''')
cursor.execute('''DROP TABLE IF EXISTS orders''')
cursor.execute('''DROP TABLE IF EXISTS order_matches''')
cursor.execute('''DROP TABLE IF EXISTS btcpays''')
cursor.execute('''DROP TABLE IF EXISTS bets''')
cursor.execute('''DROP TABLE IF EXISTS burns''')
cursor.execute('''DROP TABLE IF EXISTS cancels''')
cursor.execute('''DROP TABLE IF EXISTS order_expirations''')
cursor.execute('''DROP TABLE IF EXISTS order_match_expirations''')
cursor.execute('''DROP TABLE IF EXISTS messages''')
# Reparse all blocks, transactions.
if quiet:
log = logging.getLogger('')
log.setLevel(logging.WARNING)
initialise(db)
cursor.execute('''SELECT * FROM blocks ORDER BY block_index''')
for block in cursor.fetchall():
logging.info('Block (re-parse): {}'.format(str(block['block_index'])))
parse_block(db, block['block_index'], block['block_time'])
if quiet:
log.setLevel(logging.INFO)
# Update minor version number.
minor_version = cursor.execute('PRAGMA user_version = {}'.format(int(config.DB_VERSION_MINOR))) # Syntax?!
logging.info('Status: Database minor version number updated.')
cursor.close()
return
def reorg (db):
# Detect blockchain reorganisation up to 10 blocks length.
reorg_cursor = db.cursor()
reorg_cursor.execute('''SELECT * FROM blocks WHERE block_index = (SELECT MAX(block_index) from blocks)''')
last_block_index = util.last_block(db)['block_index']
reorg_necessary = False
for block_index in range(last_block_index - 10, last_block_index + 1):
block_hash_see = bitcoin.rpc('getblockhash', [block_index])
reorg_cursor.execute('''SELECT * FROM blocks WHERE block_index=?''', (block_index,))
block_hash_have = reorg_cursor.fetchall()[0]['block_hash']
if block_hash_see != block_hash_have:
reorg_necessary = True
logging.warning('Status: Blockchain reorganisation at block {}.'.format(block_index))
break
if not reorg_necessary: return last_block_index + 1
# Rollback the DB.
reparse(db, block_index=block_index-1, quiet=True)
reorg_cursor.close()
return block_index
def follow (db):
# TODO: This is not thread-safe!
follow_cursor = db.cursor()
logging.info('Status: RESTART')
# Reparse all transactions if minor version changes.
minor_version = follow_cursor.execute('PRAGMA user_version').fetchall()[0]['user_version']
if minor_version != config.DB_VERSION_MINOR:
logging.info('Status: Database and client minor version number mismatch ({} ≠ {}).'.format(minor_version, config.DB_VERSION_MINOR))
reparse(db, quiet=False)
# Initialise.
initialise(db)
while True:
# Get index of last block.
try:
block_index = util.last_block(db)['block_index'] + 1
except exceptions.DatabaseError:
logging.warning('Status: NEW DATABASE')
block_index = config.BLOCK_FIRST
#in the case of this, send out an initialize message to our zmq feed, any attached services
# (such as counterwalletd) can then get this and clear our their data as well, so they don't get
# duplicated data in the event of a new DB version
# Get index of last transaction.
try:
follow_cursor.execute('''SELECT * FROM transactions WHERE tx_index = (SELECT MAX(tx_index) from transactions)''')
tx_index = follow_cursor.fetchall()[0]['tx_index'] + 1
except Exception: # TODO
tx_index = 0
# Get new blocks.
block_count = bitcoin.rpc('getblockcount', [])
while block_index <= block_count:
#print('Block: {}'.format(str(block_index)))
logging.info('Block: {}'.format(str(block_index)))
block_hash = bitcoin.rpc('getblockhash', [block_index])
block = bitcoin.rpc('getblock', [block_hash])
block_time = block['time']
tx_hash_list = block['tx']
# Get and parse transactions in this block (atomically).
with db:
# List the block.
follow_cursor.execute('''INSERT INTO blocks(
block_index,
block_hash,
block_time) VALUES(?,?,?)''',
(block_index,
block_hash,
block_time)
)
# List the transactions in the block.
for tx_hash in tx_hash_list:
# Skip duplicate transaction entries.
follow_cursor.execute('''SELECT * FROM transactions WHERE tx_hash=?''', (tx_hash,))
blocks = follow_cursor.fetchall()
if blocks:
tx_index += 1
continue
# Get the important details about each transaction.
tx = bitcoin.rpc('getrawtransaction', [tx_hash, 1])
logging.debug('Status: examining transaction {}'.format(tx_hash))
source, destination, btc_amount, fee, data = get_tx_info(tx)
if source and (data or destination == config.UNSPENDABLE):
follow_cursor.execute('''INSERT INTO transactions(
tx_index,
tx_hash,
block_index,
block_time,
source,
destination,
btc_amount,
fee,
data) VALUES(?,?,?,?,?,?,?,?,?)''',
(tx_index,
tx_hash,
block_index,
block_time,
source,
destination,
btc_amount,
fee,
data)
)
tx_index += 1
# Parse the transactions in the block.
parse_block(db, block_index, block_time)
# resolve unresolved bets
bet.resolve(db)
# Increment block index.
block_count = bitcoin.rpc('getblockcount', [])
block_index +=1
while block_index > block_count: # DUPE
# Handle blockchain reorganisations, as necessary, atomically.
with db:
block_index = reorg(db)
block_count = bitcoin.rpc('getblockcount', [])
time.sleep(2)
follow_cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | -4,809,523,319,590,503,000 | 42.271044 | 139 | 0.484846 | false |
petroniocandido/pyFTS | pyFTS/benchmarks/Tests.py | 1 | 4575 |
import numpy as np
import pandas as pd
from pyFTS.benchmarks.Measures import acf
def BoxPierceStatistic(data, h):
"""
Q Statistic for Box-Pierce test
:param data:
:param h:
:return:
"""
n = len(data)
s = 0
for k in np.arange(1, h + 1):
r = acf(data, k)
s += r ** 2
return n * s
def BoxLjungStatistic(data, h):
"""
Q Statistic for Ljung–Box test
:param data:
:param h:
:return:
"""
n = len(data)
s = 0
for k in np.arange(1, h + 1):
r = acf(data, k)
s += r ** 2 / (n - k)
return n * (n - 2) * s
def format_experiment_table(df, exclude=[], replace={}, csv=True, std=False):
rows = []
columns = []
datasets = df.Dataset.unique()
models = df.Model.unique()
for model in models:
test = np.any([model.rfind(k) != -1 for k in exclude]) if len(exclude) > 0 else False
if not test:
columns.append(model)
for dataset in datasets:
row = [dataset]
if std:
row_std = [dataset]
for model in columns:
avg = np.nanmin(df[(df.Dataset == dataset) & (df.Model == model)]["AVG"].values)
row.append(round(avg, 3))
if std:
_std = np.nanmin(df[(df.Dataset == dataset) & (df.Model == model)]["STD"].values)
row_std.append("(" + str(round(_std, 3)) + ")")
rows.append(row)
if std:
rows.append(row_std)
for k in range(len(columns)):
if columns[k] in replace:
columns[k] = replace[columns[k]]
columns.insert(0, "dataset")
if csv:
header = ""
for k in range(len(columns)):
if k > 0:
header += ","
header += columns[k]
body = ""
for k in range(len(rows)):
row = ""
for w in range(len(rows[k])):
if w > 0:
row += ","
row += str(rows[k][w])
body += '\n{}'.format(row)
return header + body
else:
ret = pd.DataFrame(rows, columns=columns)
return ret
def test_mean_equality(tests, alpha=.05, method='friedman'):
"""
Test for the equality of the means, with alpha confidence level.
H_0: There's no significant difference between the means
H_1: There is at least one significant difference between the means
:param tests:
:param alpha:
:param method:
:return:
"""
from stac.stac import nonparametric_tests as npt
methods = tests.columns[1:]
values = []
for k in methods:
values.append(tests[k].values)
if method=='quade':
f_value, p_value, rankings, pivots = npt.quade_test(*values)
elif method=='friedman':
f_value, p_value, rankings, pivots = npt.friedman_aligned_ranks_test(*values)
else:
raise Exception('Unknown test method!')
print("F-Value: {} \tp-Value: {}".format(f_value, p_value))
if p_value < alpha:
print("\nH0 is rejected!\n")
else:
print("\nH0 is accepted!\n")
post_hoc = {}
rows = []
for k in np.arange(0, len(methods)):
rows.append([methods[k], rankings[k]])
post_hoc[methods[k]] = pivots[k]
return [pd.DataFrame(rows, columns=['METHOD', 'RANK']).sort_values(['RANK']), post_hoc]
def post_hoc_tests(post_hoc, control_method, alpha=.05, method='finner'):
'''
Finner paired post-hoc test with NSFTS as control method.
$H_0$: There is no significant difference between the means
$H_1$: There is a significant difference between the means
:param post_hoc:
:param control_method:
:param alpha:
:param method:
:return:
'''
from stac.stac import nonparametric_tests as npt
if method == 'bonferroni_dunn':
comparisons, z_values, p_values, adj_p_values = npt.bonferroni_dunn_test(post_hoc,control_method)
elif method == 'holm':
comparisons, z_values, p_values, adj_p_values = npt.holm_test(post_hoc,control_method)
elif method == 'finner':
comparisons, z_values, p_values, adj_p_values = npt.finner_test(post_hoc, control_method)
else:
raise Exception('Unknown test method!')
rows = []
for k in np.arange(len(comparisons)):
test = 'H0 Accepted' if adj_p_values[k] > alpha else 'H0 Rejected'
rows.append([comparisons[k], z_values[k], p_values[k], adj_p_values[k], test])
return pd.DataFrame(rows, columns=['COMPARISON', 'Z-VALUE', 'P-VALUE', 'ADJUSTED P-VALUE', 'Result'])
| gpl-3.0 | -8,407,978,959,963,033,000 | 26.383234 | 105 | 0.56265 | false |
jonathanhowells/sentimentCSV | sentimentCSV.py | 1 | 4119 | import os
import subprocess
import sys
import timeit
import csv
import pandas as pd
import re
import string
import numpy as np
import shutil
directory = os.getcwd()
os.chdir(directory)
stanford_directory = raw_input("Enter path to Stanford CoreNLP: ")
input_filename = raw_input("Enter input csv filename: ")
output_filename = raw_input("Enter output csv filename: ")
text_column = raw_input("Enter text column name: ")
print "Reading file..."
data = pd.read_csv(input_filename, error_bad_lines=False)
print "Cleaning comments..."
comments = data[text_column]
comments_clean = []
for comment in comments:
comment = re.sub(r'\n', r'',str(comment))
comment = re.sub(r'MR', r'',str(comment))
comment = re.sub(r'mr', r'',str(comment))
comment = re.sub(r'Mr', r'',str(comment))
comment = ' '.join(re.split(r'(?<=[.:;])\s', comment)[:1])
comment = comment.translate(string.maketrans("",""), string.punctuation)
comments_clean.append(comment)
comment_chunks=[comments_clean[x:x+2000] for x in xrange(0, len(comments_clean), 2000)]
input_directory = stanford_directory + '/input_data'
if not os.path.exists(input_directory):
os.makedirs(input_directory)
os.chdir(input_directory)
N = len(comment_chunks)
for n in range(N):
f = open("comments" + str(n) + ".txt", "w");
comments = comment_chunks[n]
for i in range(len(comments)):
if i == len(comments)-1:
f.write(str(comments[i]))
f.write(".")
else:
f.write(str(comments[i]))
f.write(". \n")
f.close()
os.chdir(stanford_directory)
sentiments = [' Neutral', ' Negative', ' Positive', ' Very positive', ' Very negative']
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_progress(progress):
barLength = 100 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "-"*block + " "*(barLength-block), round(progress*100,2), status)
sys.stdout.write(text)
sys.stdout.flush()
f = open("output.csv", "wb")
print "Calculating Sentiment..."
start = timeit.default_timer()
for n in range(N):
file_name = os.path.join('input_data', 'comments' + str(n) + '.txt')
p = subprocess.Popen('java -cp "*" -mx5g edu.stanford.nlp.sentiment.SentimentPipeline -file ' + file_name,
shell=True,
stdout=subprocess.PIPE)
output, errors = p.communicate()
senti_list = output.split('\n')
del output, errors
for i in range(len(senti_list)):
if i % 2 == 1 and senti_list[i] not in sentiments:
senti_list.insert(i, ' Neutral')
senti_list = senti_list[:-1]
output_list = list(chunks(senti_list, 2))
progress = float(n)/N
update_progress(progress)
#print "rows:", len(output_list)
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
writer.writerows(output_list)
del senti_list, output_list
f.close()
shutil.rmtree(stanford_directory + '/input_data/')
stop = timeit.default_timer()
print "Time taken:", stop - start
output_frame = pd.read_csv("output.csv", header=None)
output_frame.columns = ['Text', 'Sentiment']
senti_text = np.array(output_frame['Text'])
senti_bool = []
for element in senti_text:
if element == '.':
senti_bool.append(0)
else:
senti_bool.append(1)
output_frame["Text_Bool"] = pd.Series(senti_bool)
del senti_bool
data['Sentiment'] = output_frame['Sentiment']
data['Text_Bool'] = output_frame['Text_Bool']
os.chdir('..')
print "Writing to output file..."
data.to_csv(output_filename)
print "Finished!"
| gpl-2.0 | 6,229,005,747,004,761,000 | 26.278146 | 112 | 0.62151 | false |
MalmoUniversity-DA366A/calvin-base | calvin/runtime/north/calvincontrol.py | 1 | 20945 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import time
import datetime
import json
from calvin.Tools import cscompiler as compiler
from calvin.Tools import deployer
from calvin.utilities.calvinlogger import get_logger
from calvin.utilities.calvin_callback import CalvinCB
from calvin.runtime.south.plugins.async import server_connection
from urlparse import urlparse
_log = get_logger(__name__)
control_api_doc = ""
# control_api_doc += \
"""
GET /log
Streaming log from calvin node (more documentation needed)
"""
re_get_log = re.compile(r"GET /log\sHTTP/1")
control_api_doc += \
"""
GET /id
Get id of this calvin node
Response: node-id
"""
re_get_node_id = re.compile(r"GET /id\sHTTP/1")
control_api_doc += \
"""
GET /nodes
List nodes in network (excluding self)
Response: List of node-ids
"""
re_get_nodes = re.compile(r"GET /nodes\sHTTP/1")
control_api_doc += \
"""
GET /node/{node-id}
Get information on node node-id
Response:
{
"attributes": null,
"control_uri": "http://<address>:<controlport>",
"uri": "calvinip://<address>:<port>"
}
"""
re_get_node = re.compile(r"GET /node/((NODE_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
POST /peer_setup
Add calvin nodes to network
Body: {"peers: ["calvinip://<address>:<port>", ...] }
Response: {"result": "OK"}
"""
re_post_peer_setup = re.compile(r"POST /peer_setup\sHTTP/1")
control_api_doc += \
"""
GET /applications
Get applications launched from this node
Response: List of application ids
"""
re_get_applications = re.compile(r"GET /applications\sHTTP/1")
control_api_doc += \
"""
GET /application/{application-id}
Get information on application application-id
Response:
{
"origin_node_id": <node id>,
"actors": <list of actor ids>
"name": <name or id of this application>
}
"""
re_get_application = re.compile(r"GET /application/((APP_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
DELETE /application/{application-id}
Stop application (only applications launched from this node)
Response: {"result: "OK"}
"""
re_del_application = re.compile(r"DELETE /application/((APP_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
POST /actor
Create a new actor
Body:
{
"actor_type:" <type of actor>,
"args" : { "name": <name of actor>, <actor argument>:<value>, ... }
"deploy_args" : {"app_id": <application id>, "app_name": <application name>} (optional)
}
Response: {"actor_id": <actor-id>}
"""
re_post_new_actor = re.compile(r"POST /actor\sHTTP/1")
control_api_doc += \
"""
GET /actors
Get list of actors on this runtime
Response: list of actor ids
"""
re_get_actors = re.compile(r"GET /actors\sHTTP/1")
control_api_doc += \
"""
GET /actor/{actor-id}
Get information on actor
Response:
{
"inports": list inports
"node_id": <node-id>,
"type": <actor type>,
"name": <actor name>,
"outports": list of outports
}
"""
re_get_actor = re.compile(r"GET /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
DELETE /actor/{actor-id}
Delete actor
Response: {"result": "OK"}
"""
re_del_actor = re.compile(r"DELETE /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
GET /actor/{actor-id}/report
Some actor store statistics on inputs and outputs, this reports these. Not always present.
Repsonse: Depends on actor
"""
re_get_actor_report = re.compile(r"GET /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/report\sHTTP/1")
control_api_doc += \
"""
POST /actor/{actor-id}/migrate
Migrate actor to (other) node
Body: {"peer_node_id": <node-id>}
Response: {"result": "ACK"}
"""
re_post_actor_migrate = re.compile(r"POST /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/migrate\sHTTP/1")
control_api_doc += \
"""
POST /actor/{actor-id}/disable
DEPRECATED. Disables an actor
Response: {"result": "OK"}
"""
re_post_actor_disable = re.compile(r"POST /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/disable\sHTTP/1")
# control_api_doc += \
"""
GET /actor/{actor-id}/port/{port-id}
Broken. Get information on port {port-id} of actor {actor-id}
"""
re_get_port = re.compile(r"GET /actor/((ACTOR_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/port/((PORT_)?[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\sHTTP/1")
control_api_doc += \
"""
POST /connect
Connect actor ports
Body:
{
"actor_id" : <actor-id>,
"port_name": <port-name>,
"port_dir": <in/out>,
"peer_node_id": <node-id>,
"peer_actor_id": <actor-id>,
"peer_port_name": <port-name>,
"peer_port_dir": <out/in>
}
Response: {"result": "OK"}
"""
re_post_connect = re.compile(r"POST /connect\sHTTP/1")
control_api_doc += \
"""
POST /set_port_property
Sets a property of the port. Currently only fanout on outports is supported.
Body:
{
"actor_id" : <actor-id>,
"port_type": <in/out>,
"port_name": <port-name>,
"port_property": <property-name>
"value" : <property value>
}
Response: {"result": "OK"}
"""
re_set_port_property = re.compile(r"POST /set_port_property\sHTTP/1")
control_api_doc += \
"""
POST /deploy
Compile and deploy a calvin script to this calvin node
Body:
{
"name": <application name>,
"script": <calvin script>
}
Response: {"application_id": <application-id>}
"""
re_post_deploy = re.compile(r"POST /deploy\sHTTP/1")
control_api_doc += \
"""
POST /disconnect
Disconnect a port. If port felds are empty, all ports of the actor are disconnected
Body:
{
"actor_id": <actor-id>,
"port_name": <port-name>,
"port_dir": <in/out>,
"port_id": <port-id>
}
Response: {"result": "OK"}
"""
re_post_disconnect = re.compile(r"POST /disconnect\sHTTP/1")
control_api_doc += \
"""
DELETE /node
Stop (this) calvin node
Response: {"result": "OK"}
"""
re_delete_node = re.compile(r"DELETE /node\sHTTP/1")
control_api_doc += \
"""
POST /index/{key}
Store value under index key
Body:
{
"value": <string>
}
Response: {"result": "true"}
"""
re_post_index = re.compile(r"POST /index/([0-9a-zA-Z\.\-/]*)\sHTTP/1")
control_api_doc += \
"""
DELETE /index/{key}
Remove value from index key
Body:
{
"value": <string>
}
Response: {"result": "true"}
"""
re_delete_index = re.compile(r"DELETE /index/([0-9a-zA-Z\.\-/]*)\sHTTP/1")
control_api_doc += \
"""
GET /index/{key}
Fetch values under index key
Response: {"result": <list of strings>}
"""
re_get_index = re.compile(r"GET /index/([0-9a-zA-Z\.\-/]*)\sHTTP/1")
_calvincontrol = None
def get_calvincontrol():
""" Returns the CalvinControl singleton
"""
global _calvincontrol
if _calvincontrol is None:
_calvincontrol = CalvinControl()
return _calvincontrol
class CalvinControl(object):
""" A HTTP REST API for calvin nodes
"""
def __init__(self):
self.node = None
self.log_connection = None
self.routes = None
self.server = None
self.connections = {}
def start(self, node, uri):
""" Start listening and handle request on uri
"""
self.port = int(urlparse(uri).port)
self.host = urlparse(uri).hostname
_log.info("Listening on: %s:%s" % (self.host, self.port))
self.node = node
# Set routes for requests
self.routes = [
(re_get_log, self.handle_get_log),
(re_get_node_id, self.handle_get_node_id),
(re_get_nodes, self.handle_get_nodes),
(re_get_node, self.handle_get_node),
(re_post_peer_setup, self.handle_peer_setup),
(re_get_applications, self.handle_get_applications),
(re_get_application, self.handle_get_application),
(re_del_application, self.handle_del_application),
(re_post_new_actor, self.handle_new_actor),
(re_get_actors, self.handle_get_actors),
(re_get_actor, self.handle_get_actor),
(re_del_actor, self.handle_del_actor),
(re_get_actor_report, self.handle_get_actor_report),
(re_post_actor_migrate, self.handle_actor_migrate),
(re_post_actor_disable, self.handle_actor_disable),
(re_get_port, self.handle_get_port),
(re_post_connect, self.handle_connect),
(re_set_port_property, self.handle_set_port_property),
(re_post_deploy, self.handle_deploy),
(re_delete_node, self.handle_quit),
(re_post_disconnect, self.handle_disconnect),
(re_post_index, self.handle_post_index),
(re_delete_index, self.handle_delete_index),
(re_get_index, self.handle_get_index)
]
self.server = server_connection.ServerProtocolFactory(self.handle_request, "raw")
self.server.start(self.host, self.port)
def stop(self):
""" Stop
"""
self.server.stop()
def handle_request(self, actor_ids=None):
""" Handle incoming requests
"""
if self.server.pending_connections:
addr, conn = self.server.accept()
self.connections[addr] = conn
for handle, connection in self.connections.items():
if connection.data_available:
data = connection.data_get()
found = False
for route in self.routes:
match = route[0].match(data)
if match:
http_data = data.split("\r\n\r\n")[1]
if http_data:
http_data = json.loads(http_data)
_log.debug("Calvin control handles:\n%s\n---------------" % data)
route[1](handle, connection, match, http_data)
found = True
break
if not found:
_log.error("No route found for: %s" % data)
self.send_response(
handle, connection, "HTTP/1.0 404 Not Found\r\n")
def send_response(self, handle, connection, data):
""" Send response header text/html
"""
if not connection.connection_lost:
connection.send("HTTP/1.0 200 OK\n"
+ "Content-Type: application/json\n"
+
"Access-Control-Allow-Methods: GET, POST, PUT, DELETE, OPTIONS\n"
+ "Access-Control-Allow-Origin: *\r\n"
+ "\n")
connection.send(data)
connection.close()
del self.connections[handle]
def send_streamheader(self, connection):
""" Send response header for text/event-stream
"""
if not connection.connection_lost:
connection.send("HTTP/1.0 200 OK\n"
+ "Content-Type: text/event-stream\n"
+ "Access-Control-Allow-Origin: *\r\n"
+ "\n")
def storage_cb(self, key, value, handle, connection):
self.send_response(handle, connection, json.dumps(value))
def handle_get_log(self, handle, connection, match, data):
""" Get log stream
"""
self.log_connection = connection
self.send_streamheader(connection)
def handle_get_node_id(self, handle, connection, match, data):
""" Get node id from this node
"""
self.send_response(
handle, connection, json.dumps({'id': self.node.id}))
def handle_peer_setup(self, handle, connection, match, data):
self.node.peersetup(data['peers'])
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_get_nodes(self, handle, connection, match, data):
""" Get active nodes
"""
self.send_response(
handle, connection, json.dumps(self.node.network.list_links()))
def handle_get_node(self, handle, connection, match, data):
""" Get node information from id
"""
self.node.storage.get_node(match.group(1), CalvinCB(
func=self.storage_cb, handle=handle, connection=connection))
def handle_get_applications(self, handle, connection, match, data):
""" Get applications
"""
self.send_response(
handle, connection, json.dumps(self.node.app_manager.list_applications()))
def handle_get_application(self, handle, connection, match, data):
""" Get application from id
"""
self.node.storage.get_application(match.group(1), CalvinCB(
func=self.storage_cb, handle=handle, connection=connection))
def handle_del_application(self, handle, connection, match, data):
""" Delete application from id
"""
self.node.app_manager.destroy(match.group(1))
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_new_actor(self, handle, connection, match, data):
""" Create actor
"""
actor_id = self.node.new(actor_type=data['actor_type'], args=data[
'args'], deploy_args=data['deploy_args'])
self.send_response(
handle, connection, json.dumps({'actor_id': actor_id}))
def handle_get_actors(self, handle, connection, match, data):
""" Get actor list
"""
actors = self.node.am.list_actors()
self.send_response(
handle, connection, json.dumps(actors))
def handle_get_actor(self, handle, connection, match, data):
""" Get actor from id
"""
self.node.storage.get_actor(match.group(1), CalvinCB(
func=self.storage_cb, handle=handle, connection=connection))
def handle_del_actor(self, handle, connection, match, data):
""" Delete actor from id
"""
self.node.am.destroy(match.group(1))
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_get_actor_report(self, handle, connection, match, data):
""" Get report from actor
"""
self.send_response(
handle, connection, json.dumps(self.node.am.report(match.group(1))))
def handle_actor_migrate(self, handle, connection, match, data):
""" Migrate actor
"""
self.node.am.migrate(match.group(1), data['peer_node_id'],
callback=CalvinCB(self.actor_migrate_cb, handle, connection))
def actor_migrate_cb(self, handle, connection, status, *args, **kwargs):
""" Migrate actor respons
"""
self.send_response(handle, connection, json.dumps({'result': status}))
def handle_actor_disable(self, handle, connection, match, data):
self.node.am.disable(match.group(1))
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_get_port(self, handle, connection, match, data):
""" Get port from id
"""
self.node.storage.get_port(match.group(2), CalvinCB(
func=self.storage_cb, handle=handle, connection=connection))
def handle_connect(self, handle, connection, match, data):
""" Connect port
"""
if "actor_id" not in data:
data["actor_id"] = None
if "port_name" not in data:
data["port_name"] = None
if "port_dir" not in data:
data["port_dir"] = None
if "port_id" not in data:
data["port_id"] = None
if "peer_node_id" not in data:
data["peer_node_id"] = None
if "peer_actor_id" not in data:
data["peer_actor_id"] = None
if "peer_port_name" not in data:
data["peer_port_name"] = None
if "peer_port_dir" not in data:
data["peer_port_dir"] = None
if "peer_port_id" not in data:
data["peer_port_id"] = None
self.node.connect(
actor_id=data["actor_id"],
port_name=data["port_name"],
port_dir=data["port_dir"],
port_id=data["port_id"],
peer_node_id=data["peer_node_id"],
peer_actor_id=data["peer_actor_id"],
peer_port_name=data["peer_port_name"],
peer_port_dir=data["peer_port_dir"],
peer_port_id=data["peer_port_id"])
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_set_port_property(self, handle, connection, match, data):
self.node.am.set_port_property(
actor_id=data["actor_id"],
port_type=data["port_type"],
port_name=data["port_name"],
port_property=data["port_property"],
value=data["value"])
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_deploy(self, handle, connection, match, data):
print "data: ", data
app_info, errors, warnings = compiler.compile(
data["script"], filename=data["name"])
app_info["name"] = data["name"]
d = deployer.Deployer(
runtime=None, deployable=app_info, node_info=None, node=self.node)
app_id = d.deploy()
self.send_response(
handle, connection, json.dumps({'application_id': app_id}))
def handle_quit(self, handle, connection, match, data):
self.node.stop()
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_disconnect(self, handle, connection, match, data):
self.node.disconnect(
data['actor_id'], data['port_name'], data['port_dir'], data['port_id'])
self.send_response(handle, connection, json.dumps({'result': 'OK'}))
def handle_post_index(self, handle, connection, match, data):
""" Add to index
"""
self.node.storage.add_index(
match.group(1), data['value'], cb=CalvinCB(self.index_cb, handle, connection))
def handle_delete_index(self, handle, connection, match, data):
""" Remove from index
"""
self.node.storage.remove_index(
match.group(1), data['value'], cb=CalvinCB(self.index_cb, handle, connection))
def handle_get_index(self, handle, connection, match, data):
""" Get from index
"""
self.node.storage.get_index(
match.group(1), cb=CalvinCB(self.get_index_cb, handle, connection))
def index_cb(self, handle, connection, *args, **kwargs):
""" Index operation response
"""
_log.debug("index cb (in control) %s, %s" % (args, kwargs))
if 'value' in kwargs:
value = kwargs['value']
else:
value = None
self.send_response(handle, connection, json.dumps({'result': value}))
def get_index_cb(self, handle, connection, key, value, *args, **kwargs):
""" Index operation response
"""
_log.debug("get index cb (in control) %s, %s" % (key, value))
self.send_response(handle, connection, json.dumps({'result': value}))
def log_firing(self, actor_name, action_method, tokens_produced, tokens_consumed, production):
""" Trace firing, sends data on log_sock
"""
if self.log_connection is not None:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
data = {}
data['timestamp'] = st
data['node_id'] = self.node.id
data['type'] = 'fire'
data['actor'] = actor_name
data['action_method'] = action_method
data['produced'] = tokens_produced
data['consumed'] = tokens_consumed
self.log_connection.send("data: %s\n\n" % json.dumps(data))
| apache-2.0 | -3,372,114,627,838,210,600 | 32.836834 | 195 | 0.575651 | false |
lyzardiar/RETools | PublicTools/bin/tools/packLuaJit.py | 1 | 1726 | #coding=utf-8
import os
import os.path
import shutil
import sys
import getopt
import string
import fnmatch
import md5
import hashlib
import zipfile
import time
import threading
import struct
from pprint import pprint
from struct import *
projectdir = os.path.dirname(os.path.realpath(__file__))
compileBin = os.path.join(projectdir, "bin/compile_scripts.bat")
def iter_find_files(path, fnexp):
for root, dirs, files, in os.walk(path):
for filename in fnmatch.filter(files, fnexp):
yield os.path.join(root, filename)
def work_file(filepath):
filepath = os.path.realpath(filepath)
cmd = compileBin + (" -i %s -o %s -m files -jit" % (filepath, filepath))
os.system(cmd)
def work_async(tardir):
cmd = compileBin + (" -i %s -o %s -m files -jit" % (tardir, tardir))
os.system(cmd)
# for filename in iter_find_files(tardir, "*.*"):
# if filename.find(".lua") != -1:
# work_file(filename)
# pass
def work():
if len(sys.argv) > 1:
inputFile = sys.argv[1]
for i in range(1, len(sys.argv)):
filepath = os.path.realpath(sys.argv[i])
if os.path.isdir(filepath):
work_async(filepath)
else:
work_file(filepath)
else:
curdir = r"C:\WorkSpace\Public\TX\Android\Versions\config2"
curdir = r"C:\WorkSpace\Public\TX\Android\Versions\Ver0.1.0.34809_encode"
curdir = r"C:\WorkSpace\Public\TX\Android\markVersion_35742-35779_2"
work_async(curdir)
os.system("pause")
if __name__ == '__main__':
work()
# try:
# work()
# except Exception, e:
# print Exception, e | mit | 5,455,863,543,919,101,000 | 26.854839 | 81 | 0.598494 | false |
CIGIHub/greyjay | greyjay/themes/models.py | 1 | 4991 | from __future__ import absolute_import, division, unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.wagtailadmin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
)
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsnippets.edit_handlers import SnippetChooserPanel
from wagtail.wagtailsnippets.models import register_snippet
from . import utils
@python_2_unicode_compatible
class ThemeContent(ClusterableModel):
name = models.CharField(max_length=255)
contact_email = models.EmailField(
blank=True,
null=True,
help_text="Only provide if this should be different from the site default email contact address.",
)
default = models.BooleanField(default=False)
panels = [
FieldPanel('name'),
FieldPanel('contact_email'),
FieldPanel('default'),
InlinePanel('block_links', label="Content Blocks"),
InlinePanel('follow_links', label="Follow Links"),
InlinePanel('logo_links', label="Logos"),
]
def __str__(self):
return self.name
register_snippet(ThemeContent)
@python_2_unicode_compatible
class Theme(models.Model):
name = models.CharField(max_length=1024)
folder = models.CharField(max_length=1024, default="themes/default")
content = models.ForeignKey(ThemeContent, null=True)
def __str__(self):
return self.name
panels = [
FieldPanel('name'),
FieldPanel('folder'),
SnippetChooserPanel('content'),
]
register_snippet(Theme)
class ThemeablePage(Page):
'''
Abstract model class to inherit from for themable pages
'''
is_creatable = False
class Meta:
abstract = True
theme = models.ForeignKey(
Theme,
on_delete=models.SET_NULL,
blank=True,
null=True,
)
def get_template(self, request, *args, **kwargs):
original_template = super(ThemeablePage, self).get_template(request, *args, **kwargs)
if self.theme is None:
return original_template
custom_template = utils.get_themed_template_name(self.theme, original_template)
if utils.template_exists(custom_template):
return custom_template
return original_template
style_panels = [
MultiFieldPanel(
[
SnippetChooserPanel('theme'),
],
heading="Theme"
),
]
@python_2_unicode_compatible
class TextBlock(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
heading = models.TextField(blank=True, default="")
content = RichTextField(blank=True, default="")
panels = [
FieldPanel('name'),
FieldPanel('heading'),
FieldPanel('content'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(TextBlock)
@python_2_unicode_compatible
class FollowLink(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
link = models.CharField(max_length=1024)
panels = [
FieldPanel('name'),
FieldPanel('link'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(FollowLink)
@python_2_unicode_compatible
class LogoBlock(models.Model):
name = models.CharField(max_length=255)
usage = models.CharField(max_length=255, blank=True, default="")
logo = models.ForeignKey(
'images.AttributedImage',
)
link = models.CharField(max_length=2048, blank=True, null=True)
panels = [
FieldPanel('name'),
ImageChooserPanel('logo'),
FieldPanel('link'),
FieldPanel('usage'),
]
def __str__(self):
return self.name
register_snippet(LogoBlock)
class ContentBlockLink(models.Model):
block = models.ForeignKey(
"TextBlock",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='block_links'
)
panels = [SnippetChooserPanel("block")]
class ContentFollowLink(models.Model):
block = models.ForeignKey(
"FollowLink",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='follow_links'
)
panels = [SnippetChooserPanel("block")]
class ContentLogoLink(models.Model):
block = models.ForeignKey(
"LogoBlock",
related_name='content_links'
)
theme_content = ParentalKey(
"ThemeContent",
related_name='logo_links'
)
panels = [SnippetChooserPanel("block")]
| mit | -7,877,395,620,834,723,000 | 23.707921 | 106 | 0.651773 | false |
devilry/devilry-django | devilry/devilry_frontpage/cradminextensions/listbuilder/listbuilder_role.py | 1 | 5167 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.translation import pgettext_lazy
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy.viewhelpers import listbuilder
from devilry.devilry_cradmin import devilry_listbuilder
class AbstractRoleItemValue(listbuilder.itemvalue.TitleDescription):
valuealias = 'user'
def get_devilryrole(self):
raise NotImplementedError()
def get_extra_css_classes_list(self):
return [
'devilry-frontpage-listbuilder-roleselect-itemvalue',
'devilry-frontpage-listbuilder-roleselect-itemvalue-{}'.format(self.get_devilryrole()),
]
class StudentRoleItemValue(AbstractRoleItemValue):
"""
Listbuilder ItemValue renderer for information about the student devilryrole.
"""
def get_devilryrole(self):
return 'student'
def get_title(self):
return pgettext_lazy('role', 'Student')
def get_description(self):
return pgettext_lazy('roleselect',
'Upload deliveries or see your delivery and feedback history.')
class ExaminerRoleItemValue(AbstractRoleItemValue):
"""
Listbuilder ItemValue renderer for information about the examiner devilryrole.
"""
def get_devilryrole(self):
return 'examiner'
def get_title(self):
return pgettext_lazy('role', 'Examiner')
def get_description(self):
return pgettext_lazy('roleselect',
'Give students feedback on their deliveries as examiner.')
class AnyAdminRoleItemValue(AbstractRoleItemValue):
"""
Listbuilder ItemValue renderer for information about the anyadmin devilryrole.
"""
def get_devilryrole(self):
return 'anyadmin'
def get_title(self):
return pgettext_lazy('role', 'Administrator')
def get_description(self):
return pgettext_lazy('roleselect',
'Manage departments, courses, semesters and assignments.')
class AbstractRoleItemFrame(devilry_listbuilder.common.GoForwardLinkItemFrame):
valuealias = 'user'
def get_url(self):
raise NotImplementedError()
def get_devilryrole(self):
raise NotImplementedError()
def get_extra_css_classes_list(self):
return [
'devilry-frontpage-listbuilder-roleselect-itemframe',
'devilry-frontpage-listbuilder-roleselect-itemframe-{}'.format(self.get_devilryrole()),
]
class StudentRoleItemFrame(AbstractRoleItemFrame):
"""
Listbuilder ItemFrame renderer for the student devilryrole.
"""
def get_devilryrole(self):
return 'student'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_student',
appname='dashboard',
roleid=None,
viewname=crapp.INDEXVIEW_NAME)
class ExaminerRoleItemFrame(AbstractRoleItemFrame):
"""
Listbuilder ItemFrame renderer for the examiner devilryrole.
"""
def get_devilryrole(self):
return 'examiner'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_examiner',
appname='assignmentlist',
roleid=None,
viewname=crapp.INDEXVIEW_NAME)
class AnyAdminRoleItemFrame(AbstractRoleItemFrame):
"""
Listbuilder ItemFrame renderer for the anyadmin devilryrole.
"""
def get_devilryrole(self):
return 'anyadmin'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin',
appname='overview',
roleid=None,
viewname=crapp.INDEXVIEW_NAME)
class RoleSelectList(listbuilder.lists.RowList):
def __init__(self, user):
super(RoleSelectList, self).__init__()
self.user = user
self.__build_list()
def __append_student_item(self):
item = StudentRoleItemFrame(inneritem=StudentRoleItemValue(value=self.user))
self.append(item)
def __append_examiner_item(self):
item = ExaminerRoleItemFrame(inneritem=ExaminerRoleItemValue(value=self.user))
self.append(item)
def __append_anyadmin_item(self):
item = AnyAdminRoleItemFrame(inneritem=AnyAdminRoleItemValue(value=self.user))
self.append(item)
def __build_list(self):
user_model = get_user_model()
self.user_is_student = user_model.objects.user_is_student(self.user)
self.user_is_examiner = user_model.objects.user_is_examiner(self.user)
self.user_is_anyadmin = user_model.objects.user_is_admin_or_superuser(self.user)
self.user_has_no_roles = True
if self.user_is_student:
self.__append_student_item()
self.user_has_no_roles = False
if self.user_is_examiner:
self.__append_examiner_item()
self.user_has_no_roles = False
if self.user_is_anyadmin:
self.__append_anyadmin_item()
self.user_has_no_roles = False
def get_extra_css_classes_list(self):
return ['devilry-frontpage-roleselectlist']
| bsd-3-clause | 8,373,252,969,505,380,000 | 30.315152 | 99 | 0.660538 | false |
serkansokmen/qn | qn/wsgi.py | 1 | 2213 | """
WSGI config for qn project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
#import sys
#import site
#import subprocess
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../")
# Add the virtualenv packages to the site directory. This uses the technique
# described at http://code.google.com/p/modwsgi/wiki/VirtualEnvironments
# Remember original sys.path.
#prev_sys_path = list(sys.path)
# Get the path to the env's site-packages directory
#site_packages = subprocess.check_output([
# os.path.join(PROJECT_ROOT, '.virtualenv/bin/python'),
# '-c',
# 'from distutils.sysconfig import get_python_lib;'
# 'print get_python_lib(),'
#]).strip()
# Add the virtualenv site-packages to the site packages
#site.addsitedir(site_packages)
# Reorder sys.path so the new directories are at the front.
#new_sys_path = []
#for item in list(sys.path):
# if item not in prev_sys_path:
# new_sys_path.append(item)
# sys.path.remove(item)
#sys.path[:0] = new_sys_path
# Add the app code to the path
#sys.path.append(PROJECT_ROOT)
os.environ['CELERY_LOADER'] = 'django'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "qn.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | 1,427,026,455,742,831,900 | 34.126984 | 79 | 0.729327 | false |
chiara-paci/costruttoridimondi | costruttoridimondi/functional_tests/test_sharing.py | 1 | 1528 | from selenium import webdriver
from . import pages,base
class SharingTest(base.MultiuserFunctionalTest):
def test_logged_in_users_stories_are_saved_as_my_stories(self):
# Edith is a logged-in user
self.set_browser('edith@example.com',size=(700,900),position=(0,0))
# Her friend Oniciferous is also hanging out on the stories site
oni_browser = self.create_user_browser_with_session('oniciferous@example.com',size=(700,900),position=(700,0))
# Edith goes to the home page and starts a list
e_story_page = pages.HomePage(self).start_new_story('Get help')
# She notices a "Share this story" option
share_box = story_page.get_share_box()
self.assertEqual(share_box.get_attribute('placeholder'),
'your-friend@example.com')
# She shares her story.
# The page updates to say that it's shared with Oniciferous:
e_story_page.share_story_with('oniciferous@example.com')
self.set_browser('oniciferous@example.com')
#self.browser = oni_browser
mystory_page=HomePage(self).go_to_home_page().click_on_mystories_link()
o_story_page=mystory_page.click_on_story_link('Get help')
self.wait_for(lambda: self.assertEqual(
o_story_page.get_story_owner(),
'edith@example.com'
))
o_story_page.add_section('Hi Edith!')
self.set_browser('edith@example.com')
o_story_page.wait_for_new_section_in_story('Hi Edith!', 2)
| gpl-3.0 | 7,599,141,757,490,930,000 | 36.268293 | 120 | 0.644634 | false |
jpancoast/aws-py-tools | checkSGForOutboundAll.py | 1 | 1424 | #!/usr/bin/env python
import sys
import signal
import boto.ec2
import operator
import getopt
from lib.AWSPyTools import ParseOptions
from lib.AWSPyTools import AWSPyTools
def main(argv):
signal.signal(signal.SIGINT, signal_handler)
po = ParseOptions(sys.argv)
(env, region, vpc_id) = po.getAwsOptions()
debug = False
awsPyTools = AWSPyTools(
region=region, environment=env, loadFromFile=False, debug=debug)
envDataString = "Running in environment: " + env + ", region: " + region
if vpc_id is not None:
envDataString += ", vpc_id: " + vpc_id
print envDataString
sgs = awsPyTools.getAllSecurityGroups(vpc_id=vpc_id)
for sgName in sgs:
sg = sgs[sgName]
if len(sg.rules_egress) > 0:
for rule in sg.rules_egress:
for grant in rule.grants:
if (rule.from_port is None or rule.from_port == 'None') and (rule.to_port is None or rule.to_port == 'None') and (rule.ip_protocol == '-1') and (str(grant.cidr_ip) == '0.0.0.0/0'):
print str(sg.name) + " (" + sg.id + ") has OUTBOUND ALL, so I'm removing that rule"
print ""
awsPyTools.revokeSGRule('egress', sg, rule, grant)
def signal_handler(signal, frame):
print sys.argv[0] + " exited via keyboard interrupt."
sys.exit(0)
if __name__ == "__main__":
main(sys.argv)
| gpl-2.0 | -3,898,750,051,795,106,300 | 26.921569 | 200 | 0.605337 | false |
tkerola/chainer | tests/chainerx_tests/unit_tests/routines_tests/test_math.py | 1 | 72049 | import chainer
import numpy
import pytest
import chainerx
import chainerx.testing
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import math_utils
from chainerx_tests import op_utils
_in_out_dtypes_arithmetic_invalid = [
(('bool_', 'bool_'), 'bool_'),
(('bool_', 'int8'), 'int8'),
(('bool_', 'int16'), 'int16'),
(('bool_', 'int32'), 'int32'),
(('bool_', 'int64'), 'int64'),
(('bool_', 'uint8'), 'uint8'),
(('bool_', 'float16'), 'float16'),
(('bool_', 'float32'), 'float32'),
(('bool_', 'float64'), 'float64'),
(('int8', 'bool_'), 'int8'),
(('int16', 'bool_'), 'int16'),
(('int32', 'bool_'), 'int32'),
(('int64', 'bool_'), 'int64'),
(('uint8', 'bool_'), 'uint8'),
(('float16', 'bool_'), 'float16'),
(('float32', 'bool_'), 'float32'),
(('float64', 'bool_'), 'float64'),
]
_in_out_dtypes_arithmetic = [
dtypes for dtypes in dtype_utils.result_dtypes_two_arrays
if dtypes not in _in_out_dtypes_arithmetic_invalid
]
_in_out_dtypes_inplace_arithmetic_invalid = [
((t1, t2), t3) for (t1, t2), t3 in _in_out_dtypes_arithmetic
if (numpy.dtype(t1).kind != 'f' and numpy.dtype(t2).kind == 'f')
] + _in_out_dtypes_arithmetic_invalid
_in_out_dtypes_inplace_arithmetic = [
dtypes for dtypes in dtype_utils.result_dtypes_two_arrays
if dtypes not in _in_out_dtypes_inplace_arithmetic_invalid
]
_in_out_dtypes_array_int_scalar = [
# Int scalar.
(('int8',), int, 'int8'),
(('int16',), int, 'int16'),
(('int32',), int, 'int32'),
(('int64',), int, 'int64'),
(('uint8',), int, 'uint8'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('int16',), numpy.int16, 'int16'),
(('uint8',), numpy.int8, 'uint8'),
(('float64',), numpy.int8, 'float64'),
(('float16',), numpy.int64, 'float16'),
]
_in_out_dtypes_int_array_float_scalar = [
# Int arrays and float scalars.
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
(('int8',), numpy.float32, 'float32'),
(('int64',), numpy.float16, 'float32'),
(('uint8',), numpy.float64, 'float32'),
]
_in_out_dtypes_float_array_float_scalar = [
# Float arrays and flaot scalars.
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
(('float64',), float, 'float64'),
(('float16',), numpy.float64, 'float16'),
(('float64',), numpy.float16, 'float64'),
]
_in_out_dtypes_arithmetic_scalar = (
_in_out_dtypes_array_int_scalar
+ _in_out_dtypes_int_array_float_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_inplace_arithmetic_scalar = (
_in_out_dtypes_array_int_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_float_arithmetic_scalar = (
_in_out_dtypes_int_array_float_scalar
+ _in_out_dtypes_float_array_float_scalar)
_in_out_dtypes_inplace_float_arithmetic_scalar = (
_in_out_dtypes_float_array_float_scalar)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.numeric_dtypes)),
'input': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.numeric_dtypes)),
'input': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'input': [float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestNegative(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
if self.is_module:
return xp.negative(a)
else:
return -a
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DtypeError, TypeError))
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_negative_invalid_bool(xp, device, is_module):
x = xp.array([True, False], dtype='bool_')
if is_module:
xp.negative(x)
else:
-x
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestAdd(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.add(a, b)
else:
return a + b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_add_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a + b
else:
chainerx.add(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestIAdd(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a += b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_iadd_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a += b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestAddScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a + scalar
else:
return scalar + a
else:
if self.is_scalar_rhs:
return xp.add(a, scalar)
else:
return xp.add(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestIAddScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a += scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSub(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.subtract(a, b)
else:
return a - b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_sub_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a - b
else:
chainerx.subtract(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestISub(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a -= b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_isub_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a -= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSubScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a - scalar
else:
return scalar - a
else:
if self.is_scalar_rhs:
return xp.subtract(a, scalar)
else:
return xp.subtract(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestISubScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a -= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': dtype_utils.result_dtypes_two_arrays,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMul(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
if self.is_module:
return xp.multiply(a, b)
else:
return a * b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.all_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_arithmetic + [
((t, 'bool_'), t) for t in chainerx.testing.all_dtypes
],
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestIMul(math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a, b):
a *= b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar + [
((t,), bool, t) for t in chainerx.testing.all_dtypes
],
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMulScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return a * scalar
else:
return scalar * a
else:
if self.is_scalar_rhs:
return xp.multiply(a, scalar)
else:
return xp.multiply(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_inplace_arithmetic_scalar + [
((t,), bool, t) for t in chainerx.testing.all_dtypes
]),
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_inplace_float_arithmetic_scalar,
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [
0, -1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestIMulScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a *= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*chainer.testing.product({
'lhs,rhs': [
([], []),
([0, 1, 2, 3, 100, 101, 102, 103], [3] * 8),
([-0, -1, -2, -3, -4, -100, -101, -102, -103], [3] * 9),
([0, 1, 2, 3, 100, 101, 102, 103], [-3] * 8),
([-0, -1, -2, -3, -4, -100, -101, -102, -103], [-3] * 9),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [1.2] * 8),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4],
[1.2] * 9),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [-1.2] * 8),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4],
[-1.2] * 9),
],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'is_module': [True, False],
}))
class TestFloorDivide(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
in_dtype1, in_dtype2 = self.in_dtypes
a = numpy.array(self.lhs).astype(in_dtype1)
b = numpy.array(self.rhs).astype(in_dtype2)
return a, b
def func(self, xp, a, b):
if self.is_module:
return xp.floor_divide(a, b)
else:
return a // b
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(chainer.testing.product_dict(
chainer.testing.product({
'array': [
([]),
([0, 1, 2, 3, 100, 101, 102, 103]),
([-0, -1, -2, -3, -4, -100, -101, -102, -103]),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4]),
([-0., -0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4]),
([-0.61, -0.6, -0.59, 0.59, 0.6, 0.61]),
],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
}),
chainer.testing.product({
'scalar_value': [-3, 3, -1.2, 1.2, 0],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
})
# Special values
+ chainer.testing.product({
'scalar_value': [float('inf'), -float('inf'), float('nan')],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
})
)))
class TestFloorDivideScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
# TODO(imanishi): Remove this.
if in_dtype == 'uint8' and self.scalar_value < 0:
self.skip_forward_test = True
def generate_inputs(self):
in_dtype, = self.in_dtypes
a = numpy.array(self.array).astype(in_dtype)
return a,
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return xp.floor_divide(a, scalar)
else:
return xp.floor_divide(scalar, a)
else:
if self.is_scalar_rhs:
return a // scalar
else:
return scalar // a
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_floordiv_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a // b
else:
chainerx.floor_divide(a, b)
# TODO(imanishi): Support and test zero division and mixed dtypes.
# TODO(imanishi): Support and test chainerx.Scalar // chainerx.ndarray.
# TODO(imanishi): Support and test bool dtype.
@chainerx.testing.numpy_chainerx_array_equal(float16_rtol=1e-3)
@pytest.mark.parametrize('lhs,rhs', [
([], []),
([0, 1, 2, 3, 100, 101, 102, 103], [3] * 8),
([-1, -2, -3, -4, -100, -101, -102, -103], [3] * 8),
([0, 1, 2, 3, 100, 101, 102, 103], [-3] * 8),
([-1, -2, -3, -4, -100, -101, -102, -103], [-3] * 8),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [1.2] * 8),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], [1.2] * 8),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], [-1.2] * 8),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], [-1.2] * 8),
([0, 1, 2, 3, 100, 101, 102, 103], 3),
([-1, -2, -3, -4, -100, -101, -102, -103], 3),
([0, 1, 2, 3, 100, 101, 102, 103], -3),
([-1, -2, -3, -4, -100, -101, -102, -103], -3),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], 1.2),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], 1.2),
([0., 0.8, 1.6, 2.4, 100., 100.8, 101.6, 102.4], -1.2),
([-0.8, -1.6, -2.4, -3.2, -100., -100.8, -101.6, -102.4], -1.2),
])
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
def test_ifloordiv(xp, lhs, rhs, device, numeric_dtype):
if numpy.array(lhs).dtype.kind != numpy.dtype(numeric_dtype).kind:
return chainerx.testing.ignore()
lhs = xp.array(lhs).astype(numeric_dtype)
if isinstance(rhs, (list, tuple)):
rhs = xp.array(rhs).astype(numeric_dtype)
lhs //= rhs
return lhs
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_inplace_arithmetic_invalid)
def test_ifloordiv_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
a //= b
_in_out_dtypes_inplace_truediv = [
(('float32', 'int16'), 'float32'),
(('float64', 'uint8'), 'float64'),
(('float16', 'float16'), 'float16'),
(('float32', 'float32'), 'float32'),
(('float64', 'float64'), 'float64'),
(('float32', 'float16'), 'float32'),
(('float16', 'float64'), 'float64'),
]
_in_out_dtypes_truediv = _in_out_dtypes_inplace_truediv + [
(('int8', 'int8'), 'float32'),
(('int16', 'int16'), 'float32'),
(('int32', 'int32'), 'float32'),
(('int64', 'int64'), 'float32'),
(('uint8', 'uint8'), 'float32'),
(('int8', 'int32'), 'float32'),
(('uint8', 'int64'), 'float32'),
(('int8', 'uint8'), 'float32'),
(('int32', 'float16'), 'float16'),
(('uint8', 'float32'), 'float32'),
]
_in_out_dtypes_inplace_truediv_scalar = [
(('int8',), int, 'float32'),
(('int16',), int, 'float32'),
(('int32',), int, 'float32'),
(('int64',), int, 'float32'),
(('uint8',), int, 'float32'),
(('float16',), int, 'float16'),
(('float32',), int, 'float32'),
(('float64',), int, 'float64'),
(('float16',), float, 'float16'),
(('float32',), float, 'float32'),
(('float64',), float, 'float64'),
]
_in_out_dtypes_truediv_scalar = _in_out_dtypes_inplace_truediv_scalar + [
(('int8',), float, 'float32'),
(('int16',), float, 'float32'),
(('int32',), float, 'float32'),
(('int64',), float, 'float32'),
(('uint8',), float, 'float32'),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestTrueDivide(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def setup(self):
super().setup()
dtype1, dtype2 = self.in_dtypes
if dtype1 == 'float16' or dtype2 == 'float16':
self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 5e-3, 'atol': 5e-3})
def generate_inputs(self):
a, b = super().generate_inputs()
if self.input_lhs == 'random':
# Avoid (-0.3, 0.3) interval
with math_utils.IgnoreNumpyFloatingPointError():
b[numpy.logical_and(-0.3 < b, b < 0.3)] = 1
return a, b
def func(self, xp, a, b):
if self.is_module:
return xp.divide(a, b)
else:
return a / b
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_truediv_invalid_dtypes(device, dtypes, is_module):
(in_dtype1, in_dtype2), _ = dtypes
shape = (2, 3)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
if is_module:
a / b
else:
chainerx.true_divide(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_inplace_binary,
'in_dtypes,out_dtype': _in_out_dtypes_inplace_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_inplace_truediv,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestITrueDivide(
math_utils.InplaceBinaryMathTestBase, op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
a, b = super().generate_inputs()
if self.input_lhs == 'random':
with math_utils.IgnoreNumpyFloatingPointError():
b[numpy.logical_and(-0.3 < b, b < 0.3)] = 1
return a, b
def func(self, xp, a, b):
a /= b
# TODO(hvy): Support and test zero division and mixed dtypes (dtype kinds).
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_truediv_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Dtype combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_truediv_scalar,
'input': ['random'],
'scalar_value': [1],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [-1, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [True, False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestTrueDivideScalar(
math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
check_numpy_strides_compliance = False
def generate_inputs(self):
# Do not divide by small number to avoid ridiculously large outputs.
if not self.is_scalar_rhs and self.input == 'random':
in_dtype, = self.in_dtypes
low = -5 if numpy.dtype(in_dtype).kind != 'u' else 2
high = 5
x = array_utils.uniform(self.shape, in_dtype, low=low, high=high)
x[(-1 < x) & (x < 0)] = -2
x[(0 <= x) & (x < 1)] = 2
return x,
return super().generate_inputs()
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
return xp.divide(a, scalar)
else:
return xp.divide(scalar, a)
else:
if self.is_scalar_rhs:
return a / scalar
else:
return scalar / a
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': ['random'],
'scalar_value': [1],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
1, chainerx.testing.float_dtypes)),
'scalar_type': [float],
'input': [float('inf'), -float('inf'), float('nan')],
'scalar_value': [-1, 1, 2, float('inf'), -float('inf'), float('nan')],
})
))
class TestITrueDivideScalar(
math_utils.InplaceMathScalarTestBase, op_utils.NumpyOpTest):
def func_scalar(self, xp, a, scalar):
a /= scalar
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('in_dtypes,out_dtype', [
(('bool_',), 'int64'),
(('int8',), 'int64'),
(('int16',), 'int64'),
(('int32',), 'int64'),
(('int64',), 'int64'),
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
# TODO(niboshi): Unsigned integer dtypes should result in uint64.
# Currently chainerx returns int64.
(('uint8',), 'int64'),
])
@chainer.testing.parameterize_pytest('shape,axis', [
((), None),
((), ()),
((2,), None),
((2,), ()),
((2,), 0),
((2,), (0,)),
((2,), (-1,)),
((2, 3), None),
((2, 3), ()),
((2, 3), 0),
((2, 3), (0,)),
((2, 3), (1,)),
((2, 3), (-1,)),
((2, 3), (-2,)),
((2, 3), (0, 1)),
((2, 3), (-2, -1)),
((1, 3), None), # sum over 1-dim axis
((0, 3), None), # sum over 0-dim axis
# Sum over axes that are in the middle or apart
((2, 3, 4), (1,)),
((2, 3, 4), (0, 2)),
# Sum over axes that are apart and/or unsorted
((2, 3), (1, 0)),
((2, 3, 4), (2, 0)),
((2, 3, 4), (2, 0, 1)),
((2, 3, 4), (-2, 2, 0)),
])
@chainer.testing.parameterize_pytest('keepdims', [True, False])
@chainer.testing.parameterize_pytest('is_module', [True, False])
class TestSum(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
in_dtype, = self.in_dtypes
if in_dtype == 'float16':
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update(
{'rtol': 1e-2, 'atol': 1e-2})
def func(self, xp, a):
if self.is_module:
return xp.sum(a, axis=self.axis, keepdims=self.keepdims)
else:
return a.sum(axis=self.axis, keepdims=self.keepdims)
@op_utils.op_test(['native:0'])
class TestSumStability(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.full(2 ** 20, 0.1, dtype=numpy.float32),
def forward_xp(self, inputs, xp):
x, = inputs
if xp is chainerx:
return x.sum(),
else:
return (x[0] * x.size).astype(x.dtype),
@op_utils.op_test(['native:0'])
@chainer.testing.parameterize_pytest('size', list(range(1024)))
class TestSumEachSize(op_utils.NumpyOpTest):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
return numpy.arange(self.size, dtype=numpy.int32) + 1,
def forward_xp(self, inputs, xp):
x, = inputs
return x.sum(),
@chainerx.testing.numpy_chainerx_array_equal(
accept_error=(chainerx.DimensionError, ValueError))
@pytest.mark.parametrize('keepdims', [False, True])
@pytest.mark.parametrize('shape,axis', [
# ((), 0), # TODO(sonots): Fix compatibility
((), 1),
((), (1,)),
((2,), 2),
((2,), (2,)),
((2,), (-2,)),
((2, 3,), (-3,)),
((2, 3,), (-3, -4)),
((2, 3,), (0, 0)),
((2, 3,), (-1, -1)),
((2, 3,), (0, 1, 1)),
((2, 3,), (0, -2)),
])
def test_sum_invalid(is_module, xp, shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(xp, shape, dtype)
if is_module:
xp.sum(a, axis=axis, keepdims=keepdims)
else:
a.sum(axis=axis, keepdims=keepdims)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [1],
'is_scalar_rhs': [False],
})
# Differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [0, 2, 5],
'is_scalar_rhs': [False, True],
})
# Non-differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [1, 3, 4],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special float values
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_float_arithmetic_scalar),
# TODO(imanishi): Add test for NaN.
'input': [numpy.array([0, float('inf'), -float('inf')])],
'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMinimumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
if self.is_scalar_rhs:
return xp.minimum(a, scalar)
else:
return xp.minimum(scalar, a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': ['random'],
'scalar_value': [0, 1],
'is_scalar_rhs': [False],
})
# Differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [0, 2, 5],
'is_scalar_rhs': [False, True],
})
# Non-differentiable cases
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [numpy.array([1, 3, 3, 4])],
'scalar_value': [1, 3, 4],
'is_scalar_rhs': [False, True],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special float values
+ chainer.testing.product({
'in_dtypes,scalar_type,out_dtype': (
_in_out_dtypes_float_arithmetic_scalar),
# TODO(imanishi): Add test for NaN.
'input': [numpy.array([0, float('inf'), -float('inf')])],
'scalar_value': [-1, 0, 1, float('inf'), -float('inf')],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestMaximumScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func_scalar(self, xp, a, scalar):
if self.is_scalar_rhs:
return xp.maximum(a, scalar)
else:
return xp.maximum(scalar, a)
def _create_dummy_array_for_dot(xp, shape, dtype):
x = numpy.arange(numpy.prod(shape)).reshape(shape)
if dtype == 'bool_':
x = numpy.asarray(x % 2 == 0)
else:
x = x.astype(dtype)
return xp.array(x)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [0, 2, -2],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.exp(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestLog10(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.log10(a)
_logsumexp_params = [
((2,), 0),
((2,), -1),
((2, 3), None),
((2, 3), 0),
((2, 3), 1),
((2, 3), -2),
((2, 3), (0, 1)),
((2, 3), (-2, 1)),
((1, 2, 3), None),
((1, 2, 3), (1)),
((1, 2, 3), (1, 0)),
((1, 2, 3), (0, 1, 2)),
]
_invalid_logsumexp_params = [
# Axis out of bounds
((2,), 1),
((2,), -2),
((2,), (0, 1)),
((2, 3), (0, 1, 2)),
# Duplicate axes
((2,), (0, 0)),
((2, 3), (0, 0)),
]
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest('keepdims', [True, False])
class TestLogSumExp(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
if self.in_dtypes == 'float16':
# TODO(imanishi): Support device implementation and remove this.
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
keepdims = self.keepdims
if xp is chainerx:
return chainerx.logsumexp(x, axis=axis, keepdims=keepdims),
x = x.astype(self.out_dtype)
return numpy.log(numpy.exp(x).sum(axis=axis, keepdims=keepdims)),
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
@pytest.mark.parametrize('keepdims', [True, False])
# TODO(hvy): Should not overflow for large numbers, add tests
def test_logsumexp_invalid(device, a_shape, axis, keepdims, dtype):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, dtype)
with pytest.raises(chainerx.DimensionError):
chainerx.logsumexp(a, axis=axis, keepdims=keepdims)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
class TestLogSoftmax(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
if xp is chainerx:
return chainerx.log_softmax(x, axis=axis),
x = x.astype(self.out_dtype)
axis = axis if axis is not None else 1
return x - numpy.log(numpy.exp(x).sum(axis=axis, keepdims=True)),
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params)
def test_log_softmax_invalid(device, a_shape, axis, dtype):
a = array_utils.create_dummy_ndarray(chainerx, a_shape, dtype)
with pytest.raises(chainerx.DimensionError):
return chainerx.log_softmax(a, axis=axis)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': ['random', float('inf'), -float('inf'), float('nan')],
'input_rhs': ['random', float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSquaredDifference(op_utils.OpTest):
def setup(self):
x1_dtype, x2_dtype = self.in_dtypes
if x1_dtype == 'float16' or x2_dtype == 'float16':
self.check_forward_options.update({'atol': 3e-3, 'rtol': 3e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 5e-2})
self.check_double_backward_options.update(
{'atol': 1e-2, 'rtol': 5e-2})
def generate_inputs(self):
shape = self.shape
x1_dtype, x2_dtype = self.in_dtypes
x1 = array_utils.uniform(shape, x1_dtype)
x2 = array_utils.uniform(shape, x2_dtype)
return x1, x2
def forward_chainerx(self, inputs):
x1, x2 = inputs
y = chainerx.squared_difference(x1, x2)
return y,
def forward_expected(self, inputs):
x1, x2 = inputs
y = numpy.asarray(
numpy.square(numpy.subtract(x1, x2))).astype(x1.dtype)
return y,
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Differentiable
chainer.testing.product({
'input': [
numpy.asarray(0.),
numpy.asarray(-1.),
numpy.asarray(1.),
numpy.asarray(10.),
numpy.full((), 2.),
numpy.full((0,), 2.),
numpy.full((2, 3), 2.)
]})
+
# Nondifferentiable
chainer.testing.product({
'input': [
numpy.asarray(float('inf')),
numpy.asarray(float('nan')),
],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
@pytest.mark.parametrize('contiguous', [None, 'C'])
class TestSigmoid(op_utils.NumpyOpTest):
def setup(self, contiguous, float_dtype):
self.dtype = float_dtype
self.contiguous = contiguous
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-3}
if float_dtype == 'float16':
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def generate_inputs(self):
return self.input,
def forward_xp(self, inputs, xp):
if xp is numpy:
return 1 / (1 + numpy.exp(-inputs[0])),
return xp.sigmoid(inputs[0]),
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize_pytest('shape,axis', _logsumexp_params)
@chainer.testing.parameterize_pytest(
'in_dtypes,out_dtype', math_utils.in_out_dtypes_math_functions)
class TestSoftmax(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
input = 'random'
def setup(self):
super().setup()
self.check_forward_options.update({'rtol': 3e-3, 'atol': 3e-3})
self.check_backward_options.update({'rtol': 3e-3, 'atol': 3e-3})
def forward_xp(self, inputs, xp):
x, = inputs
axis = self.axis
if xp is chainerx:
return chainerx.softmax(x, axis=axis),
x = x.astype(self.out_dtype)
axis = axis if axis is not None else 1
return numpy.exp(x) / (numpy.exp(x).sum(axis=axis, keepdims=True)),
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [-2, 0, 2],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSquare(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.square(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (1,), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
})
# Special shapes (array.size = 0)
+ chainer.testing.product({
'shape': [(0,), (2, 0, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [1, 3],
'check_numpy_strides_compliance': [False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan'), -1, 0],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestSqrt(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.sqrt(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs,input_rhs': [(2, 2)],
'is_module': [False],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': dtype_utils.result_numeric_dtypes_two_arrays,
'input_lhs,input_rhs': [(2, 2)],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs,input_rhs': [(2, 2)],
'is_module': [True, False],
})
# Special values (integers forward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.signed_integral_dtypes)),
'input_lhs': [-2, -1, 0, 1, 2, 5],
'input_rhs': [0, 1, 2, 5],
'is_module': [False],
})
# Special values (floats forward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'input_rhs': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
# Special values (floats backward)
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.float_dtypes)),
'input_lhs': [-3.0, -1.2, 1.2, 3],
'input_rhs': [-3.0, -1.2, 0.0, 1.2, 3.0],
'is_module': [False],
})
))
class TestPower(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
def setup(self):
super().setup()
in_dtype1, in_dtype2 = self.in_dtypes
if in_dtype1 == 'float16' or in_dtype2 == 'float16':
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 5e-3, 'atol': 5e-3})
def func(self, xp, a, b):
if self.is_module:
y = xp.power(a, b)
else:
y = a ** b
return y
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# Type combinations
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [False],
'is_scalar_rhs': [True, False],
})
# is_module
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype': _in_out_dtypes_arithmetic_scalar,
'input': [2],
'scalar_value': [1.2, 2],
'is_module': [True, False],
'is_scalar_rhs': [True, False],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,scalar_type,out_dtype':
_in_out_dtypes_float_arithmetic_scalar,
'input': [-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'scalar_value': [
-1, 0, 1, 2, float('inf'), -float('inf'), float('nan')],
'is_module': [False],
'is_scalar_rhs': [False],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestPowerScalar(math_utils.MathScalarTestBase, op_utils.NumpyOpTest):
def setup(self):
super().setup()
if self.in_dtypes == 'float16':
self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
self.check_double_backward_options.update(
{'rtol': 5e-3, 'atol': 5e-3})
def func_scalar(self, xp, a, scalar):
if self.is_module:
if self.is_scalar_rhs:
y = xp.power(a, scalar)
else:
y = xp.power(scalar, a)
else:
if self.is_scalar_rhs:
y = a ** scalar
else:
y = scalar ** a
return y
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtype', chainerx.testing.all_dtypes)
@pytest.mark.parametrize('is_bool_rhs', [True, False])
@pytest.mark.parametrize('is_bool_primitive', [True, False])
@pytest.mark.parametrize('is_module', [True, False])
def test_power_invalid_bool_dtype(
device, dtype, is_bool_rhs, is_bool_primitive, is_module):
shape = (3, 2)
a = chainerx.array(array_utils.uniform(shape, dtype))
if is_bool_primitive:
b = True
else:
b = chainerx.array(array_utils.uniform(shape, 'bool'))
with pytest.raises(chainerx.DtypeError):
if is_module:
if is_bool_rhs:
chainerx.power(a, b)
else:
chainerx.power(b, a)
else:
if is_bool_rhs:
a ** b
else:
b ** a
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': ['random'],
'contiguous': [None, 'C'],
})
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestAbs(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func(self, xp, a):
assert chainerx.abs is chainerx.absolute
return xp.abs(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'shape': [(), (0,), (1,), (2, 0, 3), (1, 1, 1), (2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_dtypes_math_functions,
'input': [-2.5, -1.5, -0.1, 0.1, 1.5, 2.5],
'contiguous': [None, 'C'],
})
# Special values
+ chainer.testing.product({
'shape': [(2, 3)],
'in_dtypes,out_dtype': math_utils.in_out_float_dtypes_math_functions,
'input': [float('inf'), -float('inf'), float('nan')],
'skip_backward_test': [True],
'skip_double_backward_test': [True],
})
))
class TestFabs(math_utils.UnaryMathTestBase, op_utils.NumpyOpTest):
def func(self, xp, a):
return xp.fabs(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0.5),
numpy.asarray(-1.2),
numpy.asarray(10.9),
numpy.asarray(-10.6),
numpy.asarray(0.),
numpy.asarray(float('inf')),
numpy.asarray(-float('inf')),
numpy.asarray(float('nan')),
numpy.full((), 2.1),
numpy.full((0,), 2),
numpy.full((2, 3), 0),
numpy.full((2, 3), 2.6),
numpy.full((1, 1), -1.01),
numpy.full((1, 1), 1.99),
])
@pytest.mark.parametrize('dtypes', [
(('int8',), 'int8'),
(('int16',), 'int16'),
(('int32',), 'int32'),
(('int64',), 'int64'),
(('float16',), 'float16'),
(('float32',), 'float32'),
(('float64',), 'float64'),
])
def test_sign(xp, device, input, dtypes):
(in_dtype, ), out_dtype = dtypes
a = xp.array(input.astype(in_dtype))
return xp.sign(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0.5),
numpy.asarray(-1.2),
numpy.asarray(10.9),
numpy.asarray(float('inf')),
numpy.asarray(-float('inf')),
numpy.asarray(float('nan')),
numpy.full((), 2.1),
numpy.full((0,), 2),
numpy.full((2, 3), 2.6),
numpy.full((1, 1), 1.01),
numpy.full((1, 1), 1.99),
])
@pytest.mark.parametrize('dtypes', math_utils.in_out_dtypes_math_functions)
@pytest.mark.parametrize('func', [
lambda xp, a: xp.ceil(a),
lambda xp, a: xp.floor(a)
])
def test_rounding_routines(func, xp, device, input, dtypes):
(in_dtype, ), out_dtype = dtypes
a = xp.array(input.astype(in_dtype))
a = func(xp, a)
a = dtype_utils.cast_if_numpy_array(xp, a, out_dtype)
return a
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isnan(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isnan(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isinf(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isinf(a)
@chainerx.testing.numpy_chainerx_array_equal()
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('input', [
numpy.asarray(0), numpy.asarray(-1), numpy.asarray(
10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')),
numpy.asarray(float('nan')), numpy.full(
(), 2), numpy.full((0,), 2), numpy.full((2, 3), 2)
])
def test_isfinite(xp, device, input, dtype):
a = xp.array(input.astype(dtype))
return xp.isfinite(a)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# Dtype combinations
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [True, False],
})
# TODO(aksub99): Add tests for inf and NaN.
))
class TestMaximum(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func(self, xp, a, b):
return xp.maximum(a, b)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_maximum_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (3, 2)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
chainerx.maximum(a, b)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
# Special shapes
chainer.testing.product({
'in_shapes': math_utils.shapes_combination_binary,
'in_dtypes,out_dtype': (
dtype_utils.make_same_in_out_dtypes(
2, chainerx.testing.numeric_dtypes)),
'input_lhs': ['random'],
'input_rhs': ['random'],
'is_module': [False],
})
# is_module
+ chainer.testing.product({
'in_shapes': [((2, 3), (2, 3))],
'in_dtypes,out_dtype': _in_out_dtypes_arithmetic,
'input_lhs': ['random'],
'input_rhs': ['random'],
})
# TODO(aksub99): Add tests for inf and NaN.
))
class TestMinimum(math_utils.BinaryMathTestBase, op_utils.NumpyOpTest):
dodge_nondifferentiable = True
def func(self, xp, a, b):
return xp.minimum(a, b)
@pytest.mark.parametrize_device(['native:0', 'cuda:0'])
@pytest.mark.parametrize('dtypes', _in_out_dtypes_arithmetic_invalid)
def test_minimum_invalid_dtypes(device, dtypes):
(in_dtype1, in_dtype2), _ = dtypes
shape = (3, 2)
a = chainerx.array(array_utils.uniform(shape, in_dtype1))
b = chainerx.array(array_utils.uniform(shape, in_dtype2))
with pytest.raises(chainerx.DtypeError):
chainerx.minimum(a, b)
| mit | 5,094,575,712,246,725,000 | 31.929159 | 79 | 0.548557 | false |
barentsen/dave | diffimg/fastpsffit.py | 1 | 4310 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 16:39:13 2018
A much faster PRF fitter, with the caveat that the psf model is hardcoded.
psffit.py can fit an arbitrary PSF model to an image. The cost of this flexibility
is that it must perform numerical intergration to calculate the flux in each pixel.
This is slow. On my test machine, a 10x12 image takes 20ms to compute.
Since by far the most common model to fit is that of a symmetric Gaussian function with
a constant sky background, and this model can be computed quite quickly, this module
enables this special case to be run much faster. On the same machine, the same image
can be computed in 95.7us, or a x200 speed up. There's still more speed up to
be had if you make a Model() class that assigns memory for the model once and overwrites
it each time instead of computing from scratch in each call.
The downside is that none of the code is shared with the general purpose code.
Efforts to use numba don't seem to help much for some reason
The only two public methods are
* fastGaussianPrfFit
* computeModel
@author: fergal
"""
from __future__ import print_function
from __future__ import division
from pdb import set_trace as debug
import scipy.optimize as spOpt
from numba.types import CPointer, float64, intc
from scipy.special import erf
import numpy as np
def fastGaussianPrfFit(img, guess):
"""Fit a Symmetric Gaussian PSF to an image, really quickly
Inputs
--------
img
(2d numpy array) Image to fit
prfFunc
(function) Model to fit. See module level documentation for more details.
guess
(tuple or array) Elements are
* col0, row0
Location of PSF centroid
* sigma
Width of gaussian
* flux
Height of gaussian. Beware this is not normalized
* sky
Background level
Returns
------------
A scipy.optiminze.ResultsObject. The .x attribute contains the best fit parameters
"""
assert len(guess) == 5
mask = None
soln = spOpt.minimize(costFunc, guess,args=(img,mask), method='Nelder-Mead', bounds=None)
return soln
def costFunc(arglist, img, mask=None):
"""Compute difference between image and its model for given model params
Inputs
----------
arglist
(tuple or array) Tunable parameters of model
func
(function) Model to fit
img
(2d np array) Image to fit
Optional Inputs
----------------
mask
(2d np array) Zero elements of mask indicate bad data which should not be
included in the fit
Returns
----------
float
"""
nr, nc = img.shape
model = computeModel(nc, nr, arglist)
diff = img - model
if mask is not None:
assert np.all( mask.shape == img.shape)
diff[~mask] = 0
img[~mask] = 0 #In case bad values are set to Nan
cost = np.sqrt( np.sum(diff**2) )
return cost
def computeModel(numCols, numRows, arglist):
"""Compute model flux for an image with size (numCols, numRows)
Inputs
-------
numCols, numRows
(ints) Shape of the image to compute the model PRF for
func
(function) Model PRF
arglist
(tuple or array) Tunable parameters of the model
Returns
----------
A 2d numpy array representing the model PRF image.
"""
model = np.zeros( (numRows, numCols) )
xc = np.arange(numCols)
xr = np.arange(numRows)
cols, rows = np.meshgrid(xc, xr)
model = analytic_gaussian_integral(cols, rows, *arglist)
return model
def analytic_gaussian_integral(col, row, col0, row0, sigma0, flux0, sky):
z_col1 = .5 * (col - col0) / sigma0
z_col2 = .5 * (col+1 - col0) / sigma0
z_row1 = .5 * (row - row0) / sigma0
z_row2 = .5 * (row+1 - row0) / sigma0
flux = flux0
flux *= phi(z_col2) - phi(z_col1)
flux *= phi(z_row2) - phi(z_row1)
flux += sky
return flux
#Precompute for speed
sqrt2 = np.sqrt(2)
def phi(z):
"""Compute integral of gaussian function in the range (-Inf, z],
`z` is defined as (x - x0) / sigma, where x0 is the central value of the Gaussian.
See `scipy.special.erf` for details
"""
return .5 * ( 1 + erf(z/sqrt2) )
| mit | 5,773,724,286,923,327,000 | 24.502959 | 93 | 0.639675 | false |
caedesvvv/b2rex | scripts/b2rexpkg/charexporter.py | 1 | 8874 | """
RealXtend character exporter
"""
import os
import b2rexpkg
from b2rexpkg.siminfo import GridInfo
from b2rexpkg.simconnection import SimConnection
from b2rexpkg.ogre_exporter import OgreExporter
from b2rexpkg.hooks import reset_uuids
from ogrepkg.base import indent
from ogrepkg.armatureexport import GetArmatureObject
from ogremeshesexporter import ArmatureAnimationProxyManager, ArmatureAnimationProxy
import Blender
class CharacterExporter(object):
action_uuids = {'Walk': '6ed24bd8-91aa-4b12-ccc7-c97c857ab4e0',
'CrouchWalk': "47f5f6fb-22e5-ae44-f871-73aaaf4a6022",
'Fly': "aec4610c-757f-bc4e-c092-c6e9caf18daf",
"HoverDown": "20f063ea-8306-2562-0b07-5c853b37b31e",
"HoverUp": "62c5de58-cb33-5743-3d07-9e4cd4352864",
"Hover": "4ae8016b-31b9-03bb-c401-b1ea941db41d",
"Run": "05ddbff8-aaa9-92a1-2b74-8fe77a29b445",
"Sit": "1a5fe8ac-a804-8a5d-7cbd-56bd83184568",
"SitGround": "1c7600d6-661f-b87b-efe2-d7421eb93c86",
"Stand": "2408fe9e-df1d-1d7d-f4ff-1384fa7b350f"}
def __init__(self):
# rest
self.gridinfo = GridInfo()
self.sim = SimConnection()
self.ogre = OgreExporter()
self.settings = {}
self.actions_map = {}
for name in self.action_uuids:
self.actions_map[name.lower()] = name
def connect(self, base_url):
"""
Connect to an opensim instance
"""
self.gridinfo.connect(base_url)
print self.sim.connect(base_url)
def test(self):
"""
Api tests
"""
print self.gridinfo.getGridInfo()["gridnick"]
regions = self.gridinfo.getRegions()
for id in regions:
region = regions[id]
print " *", region["name"], region["x"], region["y"], id
# xmlrpc
print self.sim.login("caedes", "caedes", "pass")
print self.sim.sceneClear("d9d1b302-5049-452d-b176-3a9561189ca4",
"cube")
print self.sim.sceneUpload("d9d1b302-5049-452d-b176-3a9561189ca4",
"cube",
"/home/caedes/groupmembers.zip")
def writeAnimation(self, f, id, name, internal_name):
"""
Write an animation to the avatar file
"""
f.write(indent(1)+'<animation name="'+name+'" ')
f.write('id="'+id+'" internal_name="'+internal_name+'" ')
f.write('looped="1" speedfactor="1.0" ')
if 'walk' in name.lower() or 'run' in name.lower():
f.write('usevelocity="1" ')
f.write('fadein="0.25" ')
f.write('fadeout="0.25" />\n')
def writeAnimations(self, f):
"""
Write all animations to the avatar file
"""
actions = Blender.Armature.NLA.GetActions()
for name, action in actions.items():
if action.name.lower() in self.actions_map:
action_name = self.actions_map[action.name.lower()]
action_uuid = self.action_uuids[action_name]
else:
action_name = action.name
action_uuid = 'not-needed' # has to exist according to manual
self.writeAnimation(f,
action_uuid,
action_name,
action.name)
def writeProperty(self, f, name, value):
"""
Write an avatar property
"""
f.write(indent(1) + '<property name="'+name+'" value="'+value+'" />')
def writeProperties(self, f):
"""
Write all properties
"""
if self.settings['MovementSpeed']:
self.writeProperty(f, 'MovementSpeed', self.settings['MovementSpeed']) # needed??
# automatic ground offset:
# bone which should be adjusted to align with the ground
if self.settings['basebone']:
self.writeProperty(f, 'basebone', self.settings['basebone'])
# avatar skeleton's hierarchy root
if self.settings['rootbone']:
self.writeProperty(f, 'rootbone', self.settings['rootbone'])
# finetuning
if self.settings['baseoffset']:
self.writeProperty(f, 'baseoffset', self.settings['baseoffset'])
return
# parametrized head turning:
if self.settings['headbone']:
self.writeProperty(f, 'headbone', '')
if self.settings['neckbone']:
self.writeProperty(f, 'neckbone', '')
if self.settings['torsobone']:
self.writeProperty(f, 'torsobone', '')
if self.settings['headboneaxis']:
self.writeProperty(f, 'headboneaxis', '') # optional
if self.settings['neckboneaxis']:
self.writeProperty(f, 'neckboneaxis', '') # optional
if self.settings['torsoboneaxis']:
self.writeProperty(f, 'torsoboneaxis', '') # optional
def writeAvatarFile(self, f):
"""
Write an avatar file for the selected mesh.
"""
f.write('<?xml version="1.0" encoding="utf-8" ?>\n')
f.write('<avatar>\n')
f.write(indent(1)+'<version>0.2</version>\n')
f.write(indent(1)+'<base name="default_female" mesh="'+self.settings['mesh_file']+'" />\n')
f.write(indent(1)+'<skeleton name="'+self.settings['skeleton_file']+'" />\n')
#f.write(indent(1)+'<material name="male/Body" />\n')
#f.write(indent(1)+'<material name="male/Face" />\n')
first_face_image = self.getMesh().getData(0, True).faces[0].image
if first_face_image:
texture_name = os.path.basename(first_face_image.getFilename())
else:
texture_name = ''
f.write(indent(1)+'<texture_body name="'+texture_name+'" />\n')
#f.write(indent(1)+'<texture_face name="" />\n')
f.write(indent(1)+'<appearance height="1.800000" weight="1" />\n')
f.write(indent(1)+'<transformation position="%s" rotation="%s" \
scale="%s" />\n' % (self.settings['translation'],
self.settings['rotation'],
self.settings['scale']))
self.writeProperties(f)
self.writeAnimations(f)
f.write('</avatar>')
def createAvatarFile(self, path):
"""
Create the avatar file at the specified location.
"""
character_name = self.settings['character_name']
f = open(os.path.join(path, character_name + '.xml'), 'w')
self.writeAvatarFile(f)
f.close()
def getMesh(self):
"""
Get the selected mesh
"""
selected = Blender.Object.GetSelected()
for sel in selected:
if sel.getType() == 'Mesh':
return sel
def getArmature(self):
"""
Get the selected object's armature
"""
bObject = self.getMesh()
return GetArmatureObject(bObject)
def parseSettings(self, exportSettings):
"""
Decide settings for export
"""
mesh = self.getMesh()
name = mesh.getData(0, True).name
armature_name = self.getArmature().name
self.settings['character_name'] = mesh.name
self.settings['mesh_file'] = name + '.mesh'
self.settings['skeleton_file'] = armature_name + '.skeleton'
self.settings.update(exportSettings.getDict())
def setupAnimations(self):
"""
Setup animations on the ogre exporter.
"""
ogreSelection = self.ogre.meshapp.selectedObjectManager
ogreSelection.updateSelection()
armatureManager = ogreSelection.getArmatureAnimationProxyManager(self.getMesh().getData(True))
armatureManager.removeProxies() # cleanup
armatureManager.animationProxyKeyList = [] # shouldnt be needed
armatureManager.update()
actionList = armatureManager.getActions()
for action in actionList:
bAction = action.bAction
anim = ArmatureAnimationProxy(armatureManager, action,
action.getName(),
action.getFirstFrame(),
action.getLastFrame())
armatureManager.addProxy(anim)
armatureManager.savePackageSettings()
def export(self, path, pack_name, offset, exportSettings):
"""
Export the character and its avatar file.
"""
b2rexpkg.start()
self.setupAnimations()
self.ogre.export(path, pack_name, offset)
self.parseSettings(exportSettings)
self.createAvatarFile(path)
#f = open(os.path.join(path, pack_name + ".uuids"), 'w')
#b2rexpkg.write(f)
#f.close()
| lgpl-3.0 | -3,429,808,671,769,813,000 | 37.751092 | 102 | 0.564683 | false |
ARM-software/bob-build | tests/source_encapsulation/gen_fun3.py | 1 | 2535 | #!/usr/bin/env python
# Copyright 2020 Arm Limited.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import sys
def check_expected_input(input_files, expected_files):
if len(input_files) != len(expected_files):
print("Length mismatch! Input: {} Expected: {}".format(input_files, expected_files))
sys.exit(1)
for exp in expected_files:
found = False
for inp in input_files:
if inp.endswith(exp):
found = True
break
if not found:
print("Missed expected file '{}' within input {}".format(exp, input_files))
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='''Check whether provided input files match the \
expected ones. Generate fun3.c using input \
from funcs.txt''')
parser.add_argument('--in', dest='input', nargs='+', default=[], required=True,
help='Input file list')
parser.add_argument('--expected', dest='expected', default=[], nargs='+',
required=True, help='Expected input file list')
parser.add_argument('--out', dest='output', action='store', required=True, help='Output file',
type=argparse.FileType('wt'))
args = parser.parse_args()
s = '''
#define FUNCS "%(funcs)s"
int fun3(void)
{
return 0;
}
'''.lstrip()
check_expected_input(args.input, args.expected)
try:
for f in args.input:
filename = os.path.basename(f)
if filename == "funcs.txt":
with open(f, 'r') as infile:
d = {'funcs': infile.read()}
args.output.write((s % d) + '\n')
except IOError as e:
print("Input file couldn't be opened: " + str(e))
sys.exit(1)
if __name__ == "__main__":
main()
| apache-2.0 | -6,791,542,741,674,078,000 | 32.355263 | 98 | 0.589349 | false |
hugollm/lie2me | tests/test_form.py | 1 | 6803 | from unittest import TestCase
from lie2me import Form, fields
from lie2me.exceptions import BadValidation
class FormTestCase(TestCase):
def test_form_without_fields_is_always_valid(self):
form = Form({'foo': 'bar'})
form.submit()
self.assertEqual(form.errors, {})
def test_before_submission_form_valid_attribute_is_none(self):
form = Form()
self.assertEqual(form.valid, None)
def test_form_data_is_accessible_and_unchanged_before_validation(self):
form = SignupForm({
'name': 'John Doe',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
})
self.assertEqual(form.data, {
'name': 'John Doe',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
})
def test_form_validation_against_valid_data(self):
form = SignupForm({
'name': 'John Doe',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
})
form.submit()
self.assertEqual(form.valid, True)
self.assertEqual(form.errors, {})
def test_successful_validation_replaces_form_data_with_new_data(self):
form = SignupForm({
'name': ' John Doe ',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
})
form.submit()
self.assertEqual(form.data, {
'name': 'John Doe',
'email': 'john.doe@domain.com',
'password': '123',
'password2': '123',
'observations': None,
})
def test_unsuccessful_validation_does_not_replace_form_data_with_new_data(self):
form = SignupForm({
'name': ' John Doe ',
})
form.submit()
self.assertEqual(form.data['name'], ' John Doe ')
def test_form_validation_against_invalid_data(self):
form = SignupForm({
'name': 'a' * 201,
'email': 'john.doe@domain',
'password': '123',
'password2': '1234',
})
form.submit()
self.assertEqual(form.valid, False)
self.assertEqual(form.errors, {
'name': 'Must have no more than 200 characters.',
'email': 'Invalid email.',
'password2': 'Password confirmation does not match.',
})
def test_form_has_no_errors_before_calling_validate_even_if_data_is_invalid(self):
form = SignupForm({
'name': 'a' * 201,
'email': 'john.doe@domain',
'password': '12',
'password2': '123',
})
self.assertEqual(form.errors, {})
def test_form_without_errors_returning_none_in_validation_method_raises_exception(self):
form = BadValidationForm()
with self.assertRaises(BadValidation) as context:
form.submit()
self.assertEqual(str(context.exception), 'Form validation did not return any data.')
def test_nested_form_empty_data(self):
form = ProfileForm()
self.assertEqual(form.data, {'address': {}})
def test_nested_form_validated_data(self):
form = ProfileForm({
'name': 'John Doe',
'email': 'john.doe@domain.com',
'address': {
'street': 'Nowhere Street',
'number': 42,
}
})
form.submit()
self.assertEqual(form.valid, True)
self.assertEqual(form.data, {
'name': 'John Doe',
'email': 'john.doe@domain.com',
'address': {
'street': 'Nowhere Street',
'number': 42,
'complement': None,
}
})
def test_nested_form_errors(self):
form = ProfileForm({
'name': 'a' * 201,
'email': 'john.doe@domain',
'address': {
'street': 'a' * 201,
'number': -1,
}
})
form.submit()
self.assertEqual(form.valid, False)
self.assertEqual(form.errors, {
'name': 'Must have no more than 200 characters.',
'email': 'Invalid email.',
'address': {
'street': 'Must have no more than 200 characters.',
'number': 'Must not be lower than 0.',
}
})
def test_nested_form_with_error_only_in_nested_form(self):
form = ProfileForm({
'name': 'John Doe',
'email': 'john.doe@domain.com',
'address': {
'street': 'Nowhere Street',
'number': -1,
}
})
form.submit()
self.assertEqual(form.valid, False)
self.assertEqual(form.errors, {
'address': {
'number': 'Must not be lower than 0.',
}
})
def test_invalid_data_object_gets_replaced_by_no_data(self):
form = ProfileForm([1, 2, 3])
self.assertEqual(form.data, {
'address': {}
})
form.submit()
self.assertEqual(form.valid, False)
self.assertEqual(form.errors, {
'name': 'This is required.',
'email': 'This is required.',
'address': {
'street': 'This is required.',
'number': 'This is required.',
},
})
def test_weird_values_as_data_do_not_cause_exceptions(self):
form = ProfileForm()
form.submit()
form = ProfileForm(None)
form.submit()
form = ProfileForm(42)
form.submit()
form = ProfileForm([])
form.submit()
form = ProfileForm([1, 2, 3])
form.submit()
form = ProfileForm({1, 2, 3})
form.submit()
form = ProfileForm(object())
form.submit()
class SignupForm(Form):
name = fields.Text(max=200)
email = fields.Email()
password = fields.Text(min=3, trim=False)
password2 = fields.Text(trim=False)
observations = fields.Text(required=False)
_ignored_field = fields.Text()
def validate(self, data):
if 'password' in data and 'password2' in data:
if data['password'] != data['password2']:
self.error('password2', 'Password confirmation does not match.')
return data
class BadValidationForm(Form):
name = fields.Text(required=False)
def validate(self, data):
pass
class AddressForm(Form):
street = fields.Text(max=200)
number = fields.Integer(min=0)
complement = fields.Text(required=False)
class ProfileForm(Form):
name = fields.Text(max=200)
email = fields.Email()
address = AddressForm
| mit | -5,037,487,307,387,595,000 | 28.969163 | 92 | 0.5208 | false |
zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/125_IntheNameofEvilPart1/__init__.py | 1 | 6260 | import sys
from com.l2scoria.gameserver.datatables import SkillTable
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "125_IntheNameofEvilPart1"
# NPCs
MUSHIKA = 32114
KARAKAWEI = 32117
ULU_KAIMU = 32119
BALU_KAIMU = 32120
CHUTA_KAIMU = 32121
# ITEMS
GAZKH_FRAGMENT = 8782
ORNITHOMIMUS_CLAW = 8779
DEINONYCHUS_BONE = 8780
EPITAPH_OF_WISDOM = 8781
# MOBS
ORNITHOMIMUS = [ 22200,22201,22202,22219,22224,22742,22744 ]
DEINONYCHUS = [ 16067,22203,22204,22205,22220,22225,22743,22745 ]
# DROP
DROP_CHANCE = 30
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [GAZKH_FRAGMENT,ORNITHOMIMUS_CLAW,DEINONYCHUS_BONE,EPITAPH_OF_WISDOM]
def onAdvEvent(self, event, npc, player) :
htmltext = event
st = player.getQuestState(qn)
if not st : return
cond = st.getInt("cond")
if event == "32114-05.htm" :
st.setState(STARTED)
st.set("cond","1")
st.playSound("ItemSound.quest_accept")
elif event == "32114-09.htm" and cond == 1 :
st.set("cond","2")
st.giveItems(GAZKH_FRAGMENT,1)
st.playSound("ItemSound.quest_middle")
elif event == "32117-08.htm" and cond == 2 :
st.set("cond","3")
st.playSound("ItemSound.quest_middle")
elif event == "32117-14.htm" and cond == 4 :
st.set("cond","5")
st.playSound("ItemSound.quest_middle")
elif event == "32119-02.htm" :
st.set("pilar1","0")
elif cond == 5 and event.isdigit() :
correct = st.getInt("pilar1")
st.set("pilar1", str(correct+1))
htmltext = "32119-0"+str(int(event)+2)+".htm"
elif event == "32119-06.htm" and cond == 5 :
if st.getInt("pilar1") < 4 :
htmltext = "32119-00.htm"
st.unset("pilar1")
elif event == "32119-14.htm" and cond == 5 :
st.set("cond","6")
st.playSound("ItemSound.quest_middle")
elif event == "32120-02.htm" :
st.set("pilar2","0")
elif cond == 6 and event.isdigit() :
correct = st.getInt("pilar2")
st.set("pilar2", str(correct+1))
htmltext = "32120-0"+str(int(event)+2)+".htm"
elif event == "32120-06.htm" and cond == 6 :
if st.getInt("pilar2") < 4 :
htmltext = "32120-00.htm"
st.unset("pilar2")
elif event == "32120-15.htm" and cond == 6 :
st.set("cond","7")
st.playSound("ItemSound.quest_middle")
elif event == "32121-02.htm" :
st.set("pilar3","0")
elif cond == 7 and event.isdigit() :
correct = st.getInt("pilar3")
st.set("pilar3", str(correct+1))
htmltext = "32121-0"+str(int(event)+2)+".htm"
elif event == "32121-06.htm" and cond == 7 :
if st.getInt("pilar3") < 4 :
htmltext = "32121-00.htm"
st.unset("pilar3")
elif event == "32121-16.htm" and cond == 7 :
st.set("cond","8")
st.takeItems(GAZKH_FRAGMENT,-1)
st.giveItems(EPITAPH_OF_WISDOM,1)
st.playSound("ItemSound.quest_middle")
return htmltext
def onTalk (self, npc, player) :
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
cond = st.getInt("cond")
npcId = npc.getNpcId()
if npcId == MUSHIKA :
first = player.getQuestState("124_MeetingTheElroki")
if st.getState() == COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif first and first.getState().getName() == 'Completed' and st.getState() == CREATED and player.getLevel() >= 76 :
htmltext = "32114-01.htm"
elif cond == 0 :
htmltext = "32114-00.htm"
elif cond == 1 :
htmltext = "32114-07.htm"
elif cond == 2 :
htmltext = "32114-10.htm"
elif cond >= 3 and cond < 8:
htmltext = "32114-11.htm"
elif cond == 8 :
st.addExpAndSp(859195,86603)
st.unset("cond")
st.unset("pilar1")
st.unset("pilar2")
st.unset("pilar3")
st.setState(COMPLETED)
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
htmltext = "32114-12.htm"
elif npcId == KARAKAWEI :
if cond == 2 :
htmltext = "32117-01.htm"
elif cond == 3 :
htmltext = "32117-09.htm"
elif cond == 4 :
st.takeItems(ORNITHOMIMUS_CLAW,-1)
st.takeItems(DEINONYCHUS_BONE,-1)
st.playSound("ItemSound.quest_middle")
htmltext = "32117-10.htm"
elif cond == 5 :
htmltext = "32117-15.htm"
elif cond == 6 or cond == 7 :
htmltext = "32117-16.htm"
elif cond == 8 :
htmltext = "32117-17.htm"
elif npcId == ULU_KAIMU :
if cond == 5 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32119-01.htm"
elif cond == 6 :
htmltext = "32119-14.htm"
elif npcId == BALU_KAIMU :
if cond == 6 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32120-01.htm"
elif cond == 7 :
htmltext = "32120-16.htm"
elif npcId == CHUTA_KAIMU :
if cond == 7 :
npc.doCast(SkillTable.getInstance().getInfo(5089,1))
htmltext = "32121-01.htm"
elif cond == 8 :
htmltext = "32121-17.htm"
return htmltext
def onKill(self, npc, player, isPet) :
st = player.getQuestState(qn)
if not st : return
if st.getInt("cond") == 3 :
if npc.getNpcId() in ORNITHOMIMUS :
if st.getQuestItemsCount(ORNITHOMIMUS_CLAW) < 2 :
if st.getRandom(100) < DROP_CHANCE :
st.giveItems(ORNITHOMIMUS_CLAW,1)
st.playSound("ItemSound.quest_itemget")
elif npc.getNpcId() in DEINONYCHUS :
if st.getQuestItemsCount(DEINONYCHUS_BONE) < 2 :
if st.getRandom(100) < DROP_CHANCE :
st.giveItems(DEINONYCHUS_BONE,1)
st.playSound("ItemSound.quest_itemget")
if st.getQuestItemsCount(ORNITHOMIMUS_CLAW) == 2 and st.getQuestItemsCount(DEINONYCHUS_BONE) == 2 :
st.set("cond","4")
st.playSound("ItemSound.quest_middle")
return
QUEST = Quest(125,qn,"The Name of Evil - 1")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(MUSHIKA)
QUEST.addTalkId(MUSHIKA)
QUEST.addTalkId(KARAKAWEI)
QUEST.addTalkId(ULU_KAIMU)
QUEST.addTalkId(BALU_KAIMU)
QUEST.addTalkId(CHUTA_KAIMU)
for i in ORNITHOMIMUS :
QUEST.addKillId(i)
for i in DEINONYCHUS :
QUEST.addKillId(i) | gpl-3.0 | 3,013,677,245,302,723,600 | 29.691176 | 118 | 0.652236 | false |
wannabeCitizen/projectsystem | lib/verify.py | 1 | 2667 | """
For handling permission and verification requests
"""
import json
import datetime
from lib.model import User, Organization, MiniOrganization, IdeaMeta, Project
#Checks if someone is an owner of an organization they are trying to modify
def is_owner(org_id, user_id):
my_org = Organization.objects.get(unique=org_id)
if user_id in my_org.owners:
return True
return False
#Checks if the organization is open, if not, is the member allowed to do this?
def can_add(org_id, user_id):
my_org = Organization.objects.get(unique=org_id)
if my_org.open_org == True:
return True
else:
if user_id in my_org.owners:
return True
return False
#Checks if user is in organization
def is_in_org(user_id, org_id):
if is_owner(org_id, user_id):
return True
my_org = Organization.objects.get(unique=org_id)
if user_id in my_org.members:
return True
return False
def is_idea_owner(idea_id, user_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
if my_idea.created_by == user_id:
return True
else:
return False
def is_thinker(user_id, idea_id, version_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
for versions in my_idea.versions:
if versions.unique == version_id:
my_version = versions
if my_version.thinker == user_id:
return True
else:
return False
def is_commenter(user_id, idea_id, comment_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_commenter = my_idea.comments[comment_id].commenter
if my_commenter == user_id:
return True
else:
return False
def is_replier(user_id, idea_id, comment_id, reply_id):
my_idea = IdeaMeta.objects.get(unique=idea_id)
my_replier = my_idea.comments[comment_id].replies[reply_id].replier
if my_replier == user_id:
return True
else:
return False
def is_project_member(user_id, project_id):
my_project = Project.objects.get(unique=project_id)
if user_id in my_project.members:
return True
return False
def is_project_commenter(user_id, project_id, comment_id):
my_project = Project.objects.get(unique=project_id)
my_commenter = my_project.comments[comment_id].commenter.google_id
if my_commenter == user_id:
return True
else:
return False
def is_project_replier(user_id, project_id, comment_id, reply_id):
my_project = Project.objects.get(unique=project_id)
my_replier = my_project.comments[comment_id].replies[reply_id].replier.google_id
if my_replier == user_id:
return True
else:
return False
| mit | 8,657,110,029,647,846,000 | 29.306818 | 84 | 0.665542 | false |
credativ/pulp | server/test/unit/server/webservices/views/test_consumers.py | 1 | 76384 | import json
import unittest
import mock
from django.http import HttpResponseBadRequest
from base import assert_auth_CREATE, assert_auth_DELETE, assert_auth_READ, assert_auth_UPDATE
from pulp.server.exceptions import (InvalidValue, MissingResource, MissingValue,
OperationPostponed, UnsupportedValue)
from pulp.server.managers.consumer import bind
from pulp.server.managers.consumer import profile
from pulp.server.managers.consumer import query
from pulp.server.webservices.views import consumers
from pulp.server.webservices.views import util
from pulp.server.webservices.views.consumers import (ConsumersView, ConsumerBindingsView,
ConsumerBindingResourceView,
ConsumerBindingSearchView,
ConsumerContentActionView,
ConsumerContentApplicabilityView,
ConsumerContentApplicRegenerationView,
ConsumerHistoryView, ConsumerProfilesView,
ConsumerProfileResourceView,
ConsumerProfileSearchView,
ConsumerResourceView,
ConsumerResourceContentApplicRegenerationView,
ConsumerSearchView,
UnitInstallSchedulesView,
UnitInstallScheduleResourceView)
class Test_expand_consumers(unittest.TestCase):
"""
Test that using query params will expand proper consumer info.
"""
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_bind_manager')
def test_expand_consumers(self, mock_factory, mock_serial):
"""
Test for consumer info expansion with details/bindings
"""
consumers_list = [{'id': 'c1'}]
bindings = [{'consumer_id': 'c1', 'repo_id': 'repo1', 'distributor_id': 'dist1'}]
mock_factory.return_value.find_by_criteria.return_value = bindings
mock_serial.serialize.return_value = {'consumer_id': 'c1', 'repo_id': 'repo1',
'distributor_id': 'dist1',
'_href': '/some/c1/some_bind/'}
cons = consumers.expand_consumers(True, False, consumers_list)
expected_cons = [{'id': 'c1', 'bindings': [{'consumer_id': 'c1', 'repo_id': 'repo1',
'distributor_id': 'dist1', '_href': '/some/c1/some_bind/'}]}]
self.assertEqual(cons, expected_cons)
class TestConsumersView(unittest.TestCase):
"""
Test consumers view.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.expand_consumers')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_query_manager')
def test_get_all_consumers(self, mock_factory, mock_resp, mock_expand):
"""
Test the consumers retrieval.
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.return_value = consumer_mock
mock_expand.return_value = resp
request = mock.MagicMock()
request.GET = {}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_details_true(self, mock_factory, mock_resp, mock_serial):
"""
Test the consumers retrieval and include details.
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
mock_serial.serialize.return_value = []
mock_factory.consumer_bind_manager.return_value.find_by_criteria.return_value = []
request = mock.MagicMock()
request.GET = {'details': 'true'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/',
'bindings': []}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_details_false(self, mock_factory, mock_resp):
"""
Test the consumers retrieval and exclude details
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
request = mock.MagicMock()
request.GET = {'details': 'false'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_bindings_true(self, mock_factory, mock_resp, mock_serial):
"""
Test the consumers retrieval and include bindings
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
mock_serial.serialize.return_value = []
mock_factory.consumer_bind_manager.return_value.find_by_criteria.return_value = []
request = mock.MagicMock()
request.GET = {'bindings': 'true'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/',
'bindings': []}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_bindings_false(self, mock_factory, mock_resp):
"""
Test the consumers retrieval and exclude bindings
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
request = mock.MagicMock()
request.GET = {'bindings': 'false'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_all_consumers_bindings_not_boolean(self, mock_factory, mock_resp):
"""
Test the consumers retrieval with invalid boolean query param
"""
consumer_mock = mock.MagicMock()
resp = [{'id': 'foo', 'display_name': 'bar'}]
consumer_mock.find_all.return_value = resp
mock_factory.consumer_query_manager.return_value = consumer_mock
request = mock.MagicMock()
request.GET = {'bindings': 'not_boolean'}
consumers = ConsumersView()
response = consumers.get(request)
expected_cont = [{'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.generate_redirect_response')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_create_consumer(self, mock_factory, mock_resp, mock_redirect):
"""
Test consumer creation.
"""
cons = {'id': 'foo', 'display_name': 'bar'}
cert = '12345'
expected_cont = {'consumer': {'id': 'foo', 'display_name': 'bar',
'_href': '/v2/consumers/foo/'}, 'certificate': '12345'}
request = mock.MagicMock()
request.body = json.dumps({'id': 'foo', 'display_name': 'bar'})
mock_factory.return_value.register.return_value = cons, cert
consumers = ConsumersView()
response = consumers.post(request)
mock_resp.assert_called_once_with(expected_cont)
mock_redirect.assert_called_once_with(mock_resp.return_value,
expected_cont['consumer']['_href'])
self.assertTrue(response is mock_redirect.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_create_consumer_missing_param(self):
"""
Test consumer creation with missing required id.
"""
request = mock.MagicMock()
request.body = json.dumps({'display_name': 'bar'})
consumers = ConsumersView()
try:
response = consumers.post(request)
except MissingValue, response:
pass
else:
raise AssertionError("MissingValue should be raised with missing options")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['id'])
class TestConsumerResourceView(unittest.TestCase):
"""
Test consumer resource view.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_delete_consumer_resource(self, mock_factory, mock_resp):
"""
Test consumer delete resource.
"""
mock_consumer_manager = mock.MagicMock()
mock_factory.consumer_manager.return_value = mock_consumer_manager
mock_consumer_manager.unregister.return_value = None
request = mock.MagicMock()
consumer_resource = ConsumerResourceView()
response = consumer_resource.delete(request, 'test-consumer')
mock_consumer_manager.unregister.assert_called_once_with('test-consumer')
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_get_consumer_resource(self, mock_collection, mock_resp):
"""
Test single consumer retrieval.
"""
mock_collection.return_value.get_consumer.return_value = {'id': 'foo'}
request = mock.MagicMock()
request.GET = {}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_resource_with_details(self, mock_factory, mock_resp, mock_serial):
"""
Test single consumer retrieval with query param details true
"""
mock_factory.consumer_manager.return_value.get_consumer.return_value = {'id': 'foo'}
mock_serial.serialize.return_value = []
mock_factory.consumer_bind_manager.return_value.find_by_criteria.return_value = []
request = mock.MagicMock()
request.GET = {'details': 'true'}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/', 'bindings': []}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_resource_with_bindings(self, mock_factory, mock_resp, mock_serial):
"""
Test single consumer retrieval with query param bindings true
"""
mock_factory.consumer_manager.return_value.get_consumer.return_value = {'id': 'foo'}
mock_serial.serialize.return_value = []
mock_factory.consumer_bind_manager.return_value.find_by_criteria.return_value = []
request = mock.MagicMock()
request.GET = {'bindings': 'true'}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/', 'bindings': []}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_get_consumer_resource_with_details_false(self, mock_collection, mock_resp):
"""
Test single consumer retrieval with query param details false
"""
mock_collection.return_value.get_consumer.return_value = {'id': 'foo'}
request = mock.MagicMock()
request.GET = {'details': 'false'}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_get_consumer_resource_with_bindings_false(self, mock_collection, mock_resp):
"""
Test single consumer retrieval with query param bindings false
"""
mock_collection.return_value.get_consumer.return_value = {'id': 'foo'}
request = mock.MagicMock()
request.GET = {'bingings': 'false'}
consumer_resource = ConsumerResourceView()
response = consumer_resource.get(request, 'foo')
expected_cont = {'id': 'foo', '_href': '/v2/consumers/foo/'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_UPDATE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_update_consumer(self, mock_factory, mock_resp):
"""
Test consumer update.
"""
resp = {'id': 'foo', 'display_name': 'bar'}
expected_cont = {'id': 'foo', 'display_name': 'bar', '_href': '/v2/consumers/foo/'}
request = mock.MagicMock()
request.body = json.dumps({'delta': {'display_name': 'bar'}})
mock_factory.consumer_manager.return_value.update.return_value = resp
consumer_resource = ConsumerResourceView()
response = consumer_resource.put(request, 'foo')
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
class TestConsumerSearchView(unittest.TestCase):
"""
Test the ConsumerSearchView.
"""
def test_class_attributes(self):
"""
Ensure that the ConsumerSearchView has the correct class attributes.
"""
self.assertEqual(ConsumerSearchView.response_builder,
util.generate_json_response_with_pulp_encoder)
self.assertEqual(ConsumerSearchView.optional_bool_fields, ('details', 'bindings'))
self.assertTrue(isinstance(ConsumerSearchView.manager, query.ConsumerQueryManager))
@mock.patch('pulp.server.webservices.views.consumers.add_link')
@mock.patch('pulp.server.webservices.views.consumers.expand_consumers')
def test_get_results(self, mock_expand, mock_add_link):
"""
Test that results are expanded and serialized.
"""
query = mock.MagicMock()
search_method = mock.MagicMock()
mock_expand.return_value = ['result_1', 'result_2']
options = {'mock': 'options'}
consumer_search = ConsumerSearchView()
serialized_results = consumer_search.get_results(query, search_method, options)
mock_expand.assert_called_once_with(False, False, list(search_method.return_value))
mock_add_link.assert_has_calls([mock.call('result_1'), mock.call('result_2')])
self.assertEqual(serialized_results, mock_expand.return_value)
class TestConsumerBindingsView(unittest.TestCase):
"""
Represents consumers binding.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_bindings(self, mock_factory, mock_resp, mock_serial):
"""
Test all bindings retrieval
"""
mock_factory.consumer_manager.return_value.get_consumer.return_value = {'id': 'foo'}
bindings = [{'repo_id': 'some-repo', 'consumer_id': 'foo'}]
mock_factory.consumer_bind_manager.return_value.find_by_consumer.return_value = bindings
serial_resp = {'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/dist1/'}
mock_serial.serialize.return_value = serial_resp
request = mock.MagicMock()
consumer_bindings = ConsumerBindingsView()
response = consumer_bindings.get(request, 'foo')
expected_cont = [{'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/dist1/'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
class TestConsumerBindingSearchView(unittest.TestCase):
"""
Test the ConsumerBindingSearchView.
"""
def test_class_attributes(self):
"""
Ensure that the ConsumerBindingSearchView has the correct class attributes.
"""
self.assertEqual(ConsumerBindingSearchView.response_builder,
util.generate_json_response_with_pulp_encoder)
self.assertTrue(isinstance(ConsumerBindingSearchView.manager, bind.BindManager))
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_bindings_by_repoid(self, mock_factory, mock_resp, mock_serial):
"""
Test all bindings retrieval by repo-id
"""
mock_factory.consumer_manager.return_value.get_consumer.return_value = {'id': 'foo'}
bindings = [{'repo_id': 'some-repo', 'consumer_id': 'foo'}]
mock_factory.consumer_bind_manager.return_value.find_by_consumer.return_value = bindings
mock_factory.repo_query_manager.return_value.find_by_id.return_value = 'some-repo'
serial_resp = {'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/'}
mock_serial.serialize.return_value = serial_resp
request = mock.MagicMock()
consumer_bindings = ConsumerBindingsView()
response = consumer_bindings.get(request, 'foo', 'some-repo')
expected_cont = [{'consumer_id': 'foo', '_href': '/v2/consumers/foo/bindings/some-repo/',
'repo_id': 'some-repo'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_bindings_invalid_consumer(self, mock_factory):
"""
Test all bindings retrieval invalid consumer
"""
mock_factory.consumer_manager.return_value.get_consumer.side_effect = MissingResource()
request = mock.MagicMock()
consumer_bindings = ConsumerBindingsView()
try:
response = consumer_bindings.get(request, 'nonexistent_id')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with nonexistent consumer_id")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'nonexistent_id'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_consumer_bindings_invalid_repo(self, mock_factory):
"""
Test all bindings retrieval invalid repo
"""
mock_factory.repo_query_manager.return_value.find_by_id.return_value = None
request = mock.MagicMock()
consumer_bindings = ConsumerBindingsView()
try:
response = consumer_bindings.get(request, 'foo', 'some-repo')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with nonexistent repo_id")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'repo_id': 'some-repo'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.bind')
def test_create_binding_async(self, mock_bind):
"""
Test bind consumer to a repo async task.
"""
request = mock.MagicMock()
request.body = json.dumps({'repo_id': 'xxx', 'distributor_id': 'yyy'})
consumer_bindings = ConsumerBindingsView()
self.assertRaises(OperationPostponed, consumer_bindings.post, request, 'test-consumer')
mock_bind.assert_called_once_with('test-consumer', 'xxx', 'yyy', True, {}, {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.bind')
def test_create_binding_sync(self, mock_bind, mock_resp):
"""
Test bind consumer to a repo sync task(notify_agent is false)
"""
mock_bind.return_value.spawned_tasks = False
mock_bind.return_value.serialize.return_value = {'mock': 'bind'}
request = mock.MagicMock()
request.body = json.dumps({'repo_id': 'xxx', 'distributor_id': 'yyy',
'notify_agent': 'false'})
consumer_bindings = ConsumerBindingsView()
response = consumer_bindings.post(request, 'foo')
expected_cont = {'mock': 'bind'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
mock_bind.assert_called_once_with('foo', 'xxx', 'yyy', 'false', {}, {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_create_binding_with_invalid_binding_config(self):
"""
Test bind consumer to a repo witn invalid binding_config
"""
request = mock.MagicMock()
request.body = json.dumps({'binding_config': []})
consumer_bindings = ConsumerBindingsView()
try:
response = consumer_bindings.post(request, 'test-consumer')
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with wrong type binding config")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['binding_config'])
class TestConsumerBindingResourceView(unittest.TestCase):
"""
Represents consumers binding resource.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.serial_binding')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_bind_manager')
def test_get_consumer_binding_resource(self, mock_factory, mock_resp, mock_serial):
"""
Test retrieve single binding
"""
bind_resp = {'repo_id': 'some-repo', 'consumer_id': 'foo'}
mock_factory.return_value.get_bind.return_value = bind_resp
serial_resp = {'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/dist1/'}
mock_serial.serialize.return_value = serial_resp
request = mock.MagicMock()
consumer_binding = ConsumerBindingResourceView()
response = consumer_binding.get(request, 'foo', 'some-repo', 'dist1')
expected_cont = {'consumer_id': 'foo', 'repo_id': 'some-repo',
'_href': '/v2/consumers/foo/bindings/some-repo/dist1/'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.unbind')
def test_delete_binding_async_no_force(self, mock_unbind):
"""
Test consumer binding removal async no force
"""
mock_unbind.return_value.spawned_tasks = True
request = mock.MagicMock()
request.body = json.dumps({})
unbind_view = ConsumerBindingResourceView()
self.assertRaises(OperationPostponed, unbind_view.delete, request,
"consumer_id", "repo_id", "distributor_id")
mock_unbind.assert_called_once_with("consumer_id", "repo_id", "distributor_id", {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.force_unbind')
def test_delete_binding_async_yes_force(self, mock_unbind):
"""
Test consumer binding removal async with force.
"""
request = mock.MagicMock()
request.body = json.dumps({'force': True})
unbind_view = ConsumerBindingResourceView()
self.assertRaises(OperationPostponed, unbind_view.delete, request,
"consumer_id", "repo_id", "distributor_id")
mock_unbind.assert_called_once_with("consumer_id", "repo_id", "distributor_id", {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.unbind')
def test_delete_binding_sync_no_force(self, mock_unbind, mock_resp):
"""
Test consumer binding removal sync no force
"""
mock_unbind.return_value.spawned_tasks = False
mock_unbind.return_value.serialize.return_value = {'mock': 'unbind'}
request = mock.MagicMock()
request.body = json.dumps({})
unbind_view = ConsumerBindingResourceView()
response = unbind_view.delete(request, 'foo', 'some-repo', 'dist1')
expected_cont = {'mock': 'unbind'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
mock_unbind.assert_called_once_with('foo', 'some-repo', 'dist1', {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.consumer_task.force_unbind')
def test_delete_binding_sync_yes_force(self, mock_unbind, mock_resp):
"""
Test consumer binding removal sync with force
"""
mock_unbind.return_value.spawned_tasks = False
mock_unbind.return_value.serialize.return_value = {'mock': 'force-unbind'}
request = mock.MagicMock()
request.body = json.dumps({'force': True})
unbind_view = ConsumerBindingResourceView()
response = unbind_view.delete(request, 'foo', 'some-repo', 'dist1')
expected_cont = {'mock': 'force-unbind'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
mock_unbind.assert_called_once_with('foo', 'some-repo', 'dist1', {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
def test_delete_binding_invalid_force_type(self):
"""
Test consumer binding removal with invalid type force
"""
request = mock.MagicMock()
request.body = json.dumps({'force': []})
unbind_view = ConsumerBindingResourceView()
try:
response = unbind_view.delete(request, 'foo', 'some-repo', 'dist1')
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with wrong type force param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['force'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
def test_delete_binding_invalid_options_type(self):
"""
Test consumer binding removal with invalid type options
"""
request = mock.MagicMock()
request.body = json.dumps({'options': []})
unbind_view = ConsumerBindingResourceView()
try:
response = unbind_view.delete(request, 'foo', 'some-repo', 'dist1')
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with wrong type options param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['options'])
class TestConsumerContentActionView(unittest.TestCase):
"""
Test Consumer content manipulation.
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_consumer_bad_request_content(self):
"""
Test consumer invalid content action.
"""
request = mock.MagicMock()
request.body = json.dumps('')
consumer_content = ConsumerContentActionView()
response = consumer_content.post(request, 'my-consumer', 'no_such_action')
self.assertTrue(isinstance(response, HttpResponseBadRequest))
self.assertEqual(response.status_code, 400)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_consumer_content_install_missing_cons(self, mock_consumer):
"""
Test consumer content installation with missing consumer
"""
mock_consumer.return_value.get_consumer.side_effect = MissingResource()
request = mock.MagicMock()
request.body = json.dumps({"units": [], "options": {}})
consumer_content = ConsumerContentActionView()
try:
response = consumer_content.post(request, 'my-consumer', 'install')
except MissingResource, response:
pass
else:
raise AssertionError('MissingResource should be raised with missing consumer')
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'my-consumer'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_consumer_content_install_missing_units(self, mock_consumer):
"""
Test consumer content installation with missing units param
"""
mock_consumer.return_value.get_consumer.return_value = 'my-consumer'
request = mock.MagicMock()
request.body = json.dumps({'options': {}})
consumer_content = ConsumerContentActionView()
try:
response = consumer_content.post(request, 'my-consumer', 'install')
except MissingValue, response:
pass
else:
raise AssertionError('MissingValue should be raised with missing units param')
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['units'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_consumer_content_install_missing_options(self, mock_consumer):
"""
Test consumer content installation with missing options param
"""
mock_consumer.return_value.get_consumer.return_value = 'my-consumer'
request = mock.MagicMock()
request.body = json.dumps({'units': []})
consumer_content = ConsumerContentActionView()
try:
response = consumer_content.post(request, 'my-consumer', 'install')
except MissingValue, response:
pass
else:
raise AssertionError('MissingValue should be raised with missing options param')
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['options'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_agent_manager')
def test_consumer_content_install(self, mock_factory, mock_consumer):
"""
Test consumer content installation.
"""
mock_factory.return_value.install_content.return_value.task_id = '1234'
mock_consumer.return_value.get_consumer.return_value = 'my_consumer'
request = mock.MagicMock()
request.body = json.dumps({"units": [], "options": {}})
consumer_content = ConsumerContentActionView()
self.assertRaises(OperationPostponed, consumer_content.post, request,
'my-consumer', 'install')
mock_factory.return_value.install_content.assert_called_once_with(
'my-consumer', [], {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_agent_manager')
def test_consumer_content_update(self, mock_factory, mock_consumer):
"""
Test consumer content update.
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_factory.return_value.update_content.return_value.task_id = '1234'
request = mock.MagicMock()
request.body = json.dumps({"units": [], "options": {}})
consumer_content = ConsumerContentActionView()
self.assertRaises(OperationPostponed, consumer_content.post, request,
'my-consumer', 'update')
mock_factory.return_value.update_content.assert_called_once_with(
'my-consumer', [], {})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_agent_manager')
def test_consumer_content_uninstall(self, mock_factory, mock_consumer):
"""
Test consumer content uninstall.
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_factory.return_value.uninstall_content.return_value.task_id = '1234'
request = mock.MagicMock()
request.body = json.dumps({"units": [], "options": {}})
consumer_content = ConsumerContentActionView()
self.assertRaises(OperationPostponed, consumer_content.post, request,
'my-consumer', 'uninstall')
mock_factory.return_value.uninstall_content.assert_called_once_with(
'my-consumer', [], {})
class TestConsumerHistoryView(unittest.TestCase):
"""
Test Consumer history view
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_history_manager')
def test_consumer_history(self, mock_history, mock_consumer, mock_resp):
"""
Test consumer history
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_history.return_value.query.return_value = {'mock': 'some-history'}
request = mock.MagicMock()
consumer_history = ConsumerHistoryView()
request.GET = {}
response = consumer_history.get(request, 'test-consumer')
mock_history.return_value.query.assert_called_once_with(sort='descending', event_type=None,
end_date=None, start_date=None,
consumer_id='test-consumer',
limit=None)
mock_resp.assert_called_once_with({'mock': 'some-history'})
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_history_manager')
def test_consumer_history_with_filters(self, mock_history, mock_consumer, mock_resp):
"""
Test consumer history using filters
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_history.return_value.query.return_value = {'mock': 'some-history'}
request = mock.MagicMock()
consumer_history = ConsumerHistoryView()
request.GET = {'limit': '2', 'event_type': 'registered'}
response = consumer_history.get(request, 'test-consumer')
mock_history.return_value.query.assert_called_once_with(sort='descending', limit=2,
event_type='registered',
end_date=None, start_date=None,
consumer_id='test-consumer')
mock_resp.assert_called_once_with({'mock': 'some-history'})
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_history_manager')
def test_consumer_no_history(self, mock_history, mock_consumer, mock_resp):
"""
Test consumer no history
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_history.return_value.query.return_value = []
request = mock.MagicMock()
consumer_history = ConsumerHistoryView()
request.GET = {}
response = consumer_history.get(request, 'test-consumer')
mock_history.return_value.query.assert_called_once_with(sort='descending', limit=None,
event_type=None,
end_date=None, start_date=None,
consumer_id='test-consumer')
mock_resp.assert_called_once_with([])
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_consumer_history_with_nonint_limit(self, mock_consumer):
"""
Pass an invalid (non-integer) limit parameter.
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
mock_request = mock.MagicMock()
mock_request.GET = {'limit': 'not an int'}
consumer_history = ConsumerHistoryView()
try:
consumer_history.get(mock_request, 'test-consumer')
except InvalidValue, response:
pass
else:
raise AssertionError('InvalidValue should be raised if limit is not an integer')
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['limit'])
class TestConsumerProfilesView(unittest.TestCase):
"""
Represents consumers profiles
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_manager')
def test_get_consumer_profiles(self, mock_consumer, mock_profile, mock_resp):
"""
Test retrieve consumer profiles
"""
mock_consumer.return_value.get_consumer.return_value = 'test-consumer'
resp = [{'some_profile': [], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}]
mock_profile.return_value.get_profiles.return_value = resp
request = mock.MagicMock()
consumer_profiles = ConsumerProfilesView()
response = consumer_profiles.get(request, 'test-consumer')
expected_cont = [{'consumer_id': 'test-consumer', 'some_profile': [],
'_href': '/v2/consumers/test-consumer/profiles/rpm/',
'content_type': 'rpm'}]
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.generate_redirect_response')
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_create_consumer_profile(self, mock_profile, mock_resp, mock_redirect):
"""
Test create consumer profile
"""
resp = {'some_profile': [], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}
mock_profile.return_value.create.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'content_type': 'rpm', 'profile': []})
consumer_profiles = ConsumerProfilesView()
response = consumer_profiles.post(request, 'test-consumer')
expected_cont = {'consumer_id': 'test-consumer', 'some_profile': [],
'_href': '/v2/consumers/test-consumer/profiles/rpm/',
'content_type': 'rpm'}
mock_resp.assert_called_once_with(expected_cont)
mock_redirect.assert_called_once_with(mock_resp.return_value, expected_cont['_href'])
self.assertTrue(response is mock_redirect.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_create_consumer_profile_missing_param(self, mock_profile):
"""
Test create consumer profile with missing param
"""
resp = {'some_profile': [], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}
mock_profile.return_value.create.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'profile': []})
consumer_profiles = ConsumerProfilesView()
try:
response = consumer_profiles.post(request, 'test-consumer')
except MissingValue, response:
pass
else:
raise AssertionError("MissingValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['content_type'])
class TestConsumerProfileSearchView(unittest.TestCase):
"""
Test the ConsumerProfileSearchView.
"""
def test_class_attributes(self):
"""
Ensure that the ConsumerProfileSearchView has the correct class attributes.
"""
self.assertEqual(ConsumerProfileSearchView.response_builder,
util.generate_json_response_with_pulp_encoder)
self.assertTrue(isinstance(ConsumerProfileSearchView.manager, profile.ProfileManager))
class TestConsumerProfileResourceView(unittest.TestCase):
"""
Represents consumers profile resource
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_get_consumer_profile(self, mock_profile, mock_resp):
"""
Test retrieve consumer profile
"""
resp = {'some_profile': [], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}
mock_profile.return_value.get_profile.return_value = resp
request = mock.MagicMock()
consumer_profile = ConsumerProfileResourceView()
response = consumer_profile.get(request, 'test-consumer', 'rpm')
expected_cont = {'consumer_id': 'test-consumer', 'some_profile': [],
'_href': '/v2/consumers/test-consumer/profiles/rpm/',
'content_type': 'rpm'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_UPDATE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_update_consumer_profile(self, mock_profile, mock_resp):
"""
Test update consumer profile
"""
resp = {'some_profile': ['new_info'], 'consumer_id': 'test-consumer', 'content_type': 'rpm'}
mock_profile.return_value.update.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'some_profile': ['new_info']})
consumer_profile = ConsumerProfileResourceView()
response = consumer_profile.put(request, 'test-consumer', 'rpm')
expected_cont = {'consumer_id': 'test-consumer', 'some_profile': ['new_info'],
'_href': '/v2/consumers/test-consumer/profiles/rpm/',
'content_type': 'rpm'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_profile_manager')
def test_delete_consumer_profile(self, mock_profile, mock_resp):
"""
Test delete consumer profile
"""
mock_profile.return_value.delete.return_value = None
request = mock.MagicMock()
consumer_profile = ConsumerProfileResourceView()
response = consumer_profile.delete(request, 'test-consumer', 'rpm')
mock_profile.return_value.delete.assert_called_once_with('test-consumer', 'rpm')
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
class TestConsumerQueryContentApplicabilityView(unittest.TestCase):
"""
Represents consumers content applicability
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.ConsumerContentApplicabilityView')
def test_query_consumer_content_applic_bad_request(self, mock_criteria_types):
"""
Test query consumer content applic. bad request
"""
mock_criteria_types._get_consumer_criteria.side_effect = InvalidValue
request = mock.MagicMock()
request.body = json.dumps({'content_types': ['type1']})
consumer_applic = ConsumerContentApplicabilityView()
response = consumer_applic.post(request)
self.assertTrue(isinstance(response, HttpResponseBadRequest))
self.assertEqual(response.status_code, 400)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch(
'pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.retrieve_consumer_applicability')
@mock.patch('pulp.server.webservices.views.consumers.ConsumerContentApplicabilityView')
def test_query_consumer_content_applic(self, mock_criteria_types, mock_applic, mock_resp):
"""
Test query consumer content applicability
"""
resp = [{'consumers': ['c1', 'c2'],
'applicability': {'content_type_1': ['unit_1', 'unit_3']}}]
mock_criteria_types._get_consumer_criteria.return_value = {'mock': 'some-criteria'}
mock_criteria_types._get_content_types.return_value = {'mock': 'some-content-types'}
mock_applic.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'criteria': {'filters': {}}, 'content_types': ['type1']})
consumer_applic = ConsumerContentApplicabilityView()
response = consumer_applic.post(request)
mock_resp.assert_called_once_with(resp)
self.assertTrue(response is mock_resp.return_value)
def test_get_consumer_criteria_no_criteria(self):
"""
Test get consumer criteria.
"""
request = mock.MagicMock()
request.body_as_json = {}
consumer_applic = ConsumerContentApplicabilityView()
try:
response = ConsumerContentApplicabilityView._get_consumer_criteria(
consumer_applic, request)
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
m = "The input to this method must be a JSON object with a 'criteria' key."
self.assertEqual(response.error_data['property_names'], [m])
def test_get_consumer_criteria_no_content_types(self):
"""
Test get content types
"""
request = mock.MagicMock()
request.body_as_json = {'content_types': 'not_list'}
consumer_applic = ConsumerContentApplicabilityView()
try:
response = ConsumerContentApplicabilityView._get_content_types(
consumer_applic, request)
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'],
['content_types must index an array.'])
class TestConsumerContentApplicabilityView(unittest.TestCase):
"""
Represents consumers content applicability regeneration
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_post_consumer_content_applic_regen_no_criteria(self):
"""
Test create consumer content applic. regen with no criteria
"""
request = mock.MagicMock()
request.body = json.dumps({})
consumer_applic_regen = ConsumerContentApplicRegenerationView()
try:
response = consumer_applic_regen.post(request)
except MissingValue, response:
pass
else:
raise AssertionError("MissingValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['consumer_criteria'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
def test_post_consumer_content_applic_regen_invalid_criteria(self):
"""
Test create consumer content applic. regen with invalid criteria
"""
request = mock.MagicMock()
request.body = json.dumps({'consumer_criteria': []})
consumer_applic_regen = ConsumerContentApplicRegenerationView()
try:
response = consumer_applic_regen.post(request)
except InvalidValue, response:
pass
else:
raise AssertionError("InvalidValue should be raised with missing param")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['consumer_criteria'])
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.tags')
@mock.patch('pulp.server.webservices.views.consumers.Criteria.from_client_input')
@mock.patch('pulp.server.webservices.views.consumers.regenerate_applicability_for_consumers')
def test_post_consumer_content_applic_regen(self, mock_applic, mock_criteria, mock_tags):
"""
Test create consumer content applic. regen
"""
mock_task_tags = [mock_tags.action_tag.return_value]
mock_criteria.return_value.as_dict.return_value = {'mock': 'some-criteria'}
request = mock.MagicMock()
request.body = json.dumps({'consumer_criteria': {}})
consumer_applic_regen = ConsumerContentApplicRegenerationView()
try:
consumer_applic_regen.post(request)
except OperationPostponed, response:
pass
else:
raise AssertionError('OperationPostponed should be raised for asynchronous delete.')
self.assertEqual(response.http_status_code, 202)
mock_applic.apply_async_with_reservation.assert_called_with(
mock_tags.RESOURCE_REPOSITORY_PROFILE_APPLICABILITY_TYPE, mock_tags.RESOURCE_ANY_ID,
({'mock': 'some-criteria'},), tags=mock_task_tags)
class TestConsumerResourceContentApplicabilityView(unittest.TestCase):
"""
Represents consumer content applicability regeneration
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_query_manager')
def test_post_consumer_resource_content_applic_regen_no_consumer(self, mock_consumer):
"""
Test create consumer content applic. regen with invalid consumer
"""
mock_consumer.return_value.find_by_id.return_value = None
request = mock.MagicMock()
request.body = json.dumps({})
consumer_applic_regen = ConsumerResourceContentApplicRegenerationView()
try:
response = consumer_applic_regen.post(request, 'c1')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with missing param")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'c1'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.tags')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_query_manager')
@mock.patch('pulp.server.webservices.views.consumers.Criteria')
@mock.patch('pulp.server.webservices.views.consumers.regenerate_applicability_for_consumers')
def test_post_consumer_resource_content_applic_regen(self, mock_applic, mock_criteria,
mock_consumer, mock_tags):
"""
Test create consumer resource content applic. regen
"""
mock_consumer.return_value.find_by_id.return_value = 'c1'
mock_task_tags = [mock_tags.action_tag.return_value]
mock_criteria.return_value.as_dict.return_value = {'mock': 'some-criteria'}
request = mock.MagicMock()
request.body = json.dumps({})
consumer_applic_regen = ConsumerResourceContentApplicRegenerationView()
try:
consumer_applic_regen.post(request, 'c1')
except OperationPostponed, response:
pass
else:
raise AssertionError('OperationPostponed should be raised for asynchronous delete.')
self.assertEqual(response.http_status_code, 202)
mock_applic.apply_async_with_reservation.assert_called_with(
mock_tags.RESOURCE_CONSUMER_TYPE, 'c1',
({'mock': 'some-criteria'},), tags=mock_task_tags)
class TestConsumerUnitActionSchedulesView(unittest.TestCase):
"""
Test consumer schedule actions
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_schedules(self, mock_factory, mock_resp):
"""
Test consumer's schedules retrieval
"""
mock_consumer_manager = mock.MagicMock()
mock_factory.consumer_manager.return_value = mock_consumer_manager
mock_consumer_manager.get_consumer.return_value = 'c1'
mock_display = mock.MagicMock()
resp = {'_id': 'my-schedule', 'schedule': 'P1D', 'kwargs': {'options': {}, 'units': []}}
mock_display.for_display.return_value = resp
mock_factory.consumer_schedule_manager.return_value.get.return_value = [mock_display]
request = mock.MagicMock()
consumer_schedule = UnitInstallSchedulesView()
response = consumer_schedule.get(request, 'c1')
mock_factory.consumer_schedule_manager.return_value.get.assert_called_once_with(
'c1', 'scheduled_unit_install')
expected_content = [{'_id': 'my-schedule', 'kwargs': {'options': {}, 'units': []},
'_href': '/v2/consumers/c1/schedules/content/install/my-schedule/',
'options': {}, 'units': [], 'schedule': 'P1D'}]
mock_resp.assert_called_once_with(expected_content)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_schedules_missing_consumer(self, mock_factory):
"""
Test consumer's schedules retrieval missing consumer
"""
mock_consumer_manager = mock.MagicMock()
mock_factory.consumer_manager.return_value = mock_consumer_manager
mock_consumer_manager.get_consumer.side_effect = MissingResource()
request = mock.MagicMock()
consumer_schedule = UnitInstallSchedulesView()
try:
response = consumer_schedule.get(request, 'test-consumer')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with missing consumer")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'test-consumer'})
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.generate_redirect_response')
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_create_schedules(self, mock_factory, mock_resp, mock_redirect):
"""
Test consumer's schedules creation
"""
mock_consumer_manager = mock.MagicMock()
mock_factory.consumer_manager.return_value = mock_consumer_manager
mock_consumer_manager.get_consumer.return_value = 'c1'
mock_consumer_schedule_manager = mock.MagicMock()
mock_factory.consumer_schedule_manager.return_value = mock_consumer_schedule_manager
resp = {'_id': 'some-schedule', 'kwargs': {'options': {}, 'units': []}}
mock_consumer_schedule_manager.create_schedule.return_value.for_display.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'schedule': 'some-schedule'})
consumer_schedule = UnitInstallSchedulesView()
response = consumer_schedule.post(request, 'c1')
mock_consumer_schedule_manager.create_schedule.assert_called_once_with(
'scheduled_unit_install', 'c1', None, {}, 'some-schedule', None, True)
expected_cont = {'_id': 'some-schedule', 'kwargs': {'options': {}, 'units': []},
'options': {}, 'units': [],
'_href': '/v2/consumers/c1/schedules/content/install/some-schedule/'}
mock_resp.assert_called_once_with(expected_cont)
mock_redirect.assert_called_once_with(mock_resp.return_value, expected_cont['_href'])
self.assertTrue(response is mock_redirect.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_CREATE())
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_schedule_manager')
def test_create_schedules_unsupported_params(self, mock_consumer):
"""
Test consumer's schedules creation with unsupported param
"""
request = mock.MagicMock()
request.body = json.dumps({'schedule': 'some-schedule', 'unsupported_param': '1234'})
consumer_schedule = UnitInstallSchedulesView()
try:
response = consumer_schedule.post(request, 'test-consumer')
except UnsupportedValue, response:
pass
else:
raise AssertionError("UnsupportedValue should be raised with unsupported keys")
self.assertEqual(response.http_status_code, 400)
self.assertEqual(response.error_data['property_names'], ['unsupported_param'])
class TestConsumerUnitActionScheduleResourceView(unittest.TestCase):
"""
Test consumer schedule actions
"""
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_schedule(self, mock_factory, mock_resp):
"""
Test consumer's schedules resource retrieval
"""
mock_id = mock.MagicMock()
resp = {'_id': 'some-schedule', 'schedule': 'P1D', 'kwargs': {'options': {}, 'units': []}}
mock_id.for_display.return_value = resp
mock_id.id = 'some-schedule'
mock_factory.consumer_schedule_manager.return_value.get.return_value = [mock_id]
request = mock.MagicMock()
consumer_schedule = UnitInstallScheduleResourceView()
response = consumer_schedule.get(request, 'c1', 'some-schedule')
mock_factory.consumer_schedule_manager.return_value.get.assert_called_once_with(
'c1', 'scheduled_unit_install')
expected_cont = {'_id': 'some-schedule', 'kwargs': {'options': {}, 'units': []},
'_href': '/v2/consumers/c1/schedules/content/install/some-schedule/',
'options': {}, 'units': [], 'schedule': 'P1D'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_READ())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory')
def test_get_invalid_schedule(self, mock_factory, mock_resp):
"""
Test consumer's invalid schedule resource retrieval
"""
mock_factory.consumer_schedule_manager.return_value.get.return_value = []
request = mock.MagicMock()
consumer_schedule = UnitInstallScheduleResourceView()
try:
response = consumer_schedule.get(request, 'test-consumer', 'some-schedule')
except MissingResource, response:
pass
else:
raise AssertionError("MissingResource should be raised with missing param")
self.assertEqual(response.http_status_code, 404)
self.assertEqual(response.error_data['resources'], {'consumer_id': 'test-consumer',
'schedule_id': 'some-schedule'})
mock_factory.consumer_schedule_manager.return_value.get.assert_called_once_with(
'test-consumer', 'scheduled_unit_install')
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_UPDATE())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response_with_pulp_encoder')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_schedule_manager')
def test_update_schedule(self, mock_factory, mock_resp):
"""
Test consumer's schedules resource update
"""
resp = {'_id': 'some-schedule', 'schedule': 'P1D', 'kwargs': {'options': {}, 'units': []}}
mock_update_schedule = mock.MagicMock()
mock_factory.return_value.update_schedule = mock_update_schedule
mock_update_schedule.return_value.for_display.return_value = resp
request = mock.MagicMock()
request.body = json.dumps({'failure_threshold': '3', 'schedule': 'P1D'})
consumer_schedule = UnitInstallScheduleResourceView()
response = consumer_schedule.put(request, 'c1', 'some-schedule')
mock_update_schedule.assert_called_once_with('c1', 'some-schedule', None, None,
{'failure_threshold': '3',
'iso_schedule': 'P1D'})
expected_cont = {'_id': 'some-schedule', 'kwargs': {'options': {}, 'units': []},
'_href': '/v2/consumers/c1/schedules/content/install/some-schedule/',
'options': {}, 'units': [], 'schedule': 'P1D'}
mock_resp.assert_called_once_with(expected_cont)
self.assertTrue(response is mock_resp.return_value)
@mock.patch('pulp.server.webservices.views.decorators._verify_auth',
new=assert_auth_DELETE())
@mock.patch('pulp.server.webservices.views.consumers.generate_json_response')
@mock.patch('pulp.server.webservices.views.consumers.factory.consumer_schedule_manager')
def test_delete_schedule(self, mock_schedule, mock_resp):
"""
Test consumer's schedules resource delete
"""
request = mock.MagicMock()
consumer_schedule = UnitInstallScheduleResourceView()
response = consumer_schedule.delete(request, 'test-consumer', 'some-schedule')
mock_schedule.return_value.delete_schedule.assert_called_once_with(
'test-consumer', 'some-schedule')
mock_resp.assert_called_once_with(None)
self.assertTrue(response is mock_resp.return_value)
class TestConsumerAddLinks(unittest.TestCase):
def test_add_link(self):
"""
Test that the reverse for consumer works correctly.
"""
consumer = {'id': 'my_consumer'}
link = consumers.add_link(consumer)
href = {'_href': '/v2/consumers/my_consumer/'}
expected_cont = {'id': 'my_consumer', '_href': '/v2/consumers/my_consumer/'}
self.assertEqual(link, href)
self.assertEqual(consumer, expected_cont)
def test_add_link_profile(self):
"""
Test that the reverse for consumer profile works correctly.
"""
consumer_profile = {'consumer_id': 'my_consumer', 'content_type': 'rpm'}
link = consumers.add_link_profile(consumer_profile)
href = {'_href': '/v2/consumers/my_consumer/profiles/rpm/'}
expected_cont = {'consumer_id': 'my_consumer', 'content_type': 'rpm',
'_href': '/v2/consumers/my_consumer/profiles/rpm/'}
self.assertEqual(link, href)
self.assertEqual(consumer_profile, expected_cont)
def test_add_link_schedule(self):
"""
Test that the reverse for consumer schedule works correctly.
"""
consumer_id = 'c1'
action_type = 'scheduled_unit_install'
schedule = {'_id': 'schedule-id'}
link = consumers.add_link_schedule(schedule, action_type, consumer_id)
href = {'_href': '/v2/consumers/c1/schedules/content/install/schedule-id/'}
expected_cont = {'_id': 'schedule-id',
'_href': '/v2/consumers/c1/schedules/content/install/schedule-id/'}
self.assertEqual(link, href)
self.assertEqual(schedule, expected_cont)
def test_scheduled_unit_management_obj_structure(self):
"""
Modify scheduled unit management object.
"""
scheduled_call = {'kwargs': {'options': {}, 'units': []}}
expected_structure = {'kwargs': {'options': {}, 'units': []}, 'options': {}, 'units': []}
response = consumers.scheduled_unit_management_obj(scheduled_call)
self.assertEqual(response, expected_structure)
| gpl-2.0 | -1,395,906,513,170,108,400 | 46.414029 | 100 | 0.639558 | false |
pmghalvorsen/gramps_branch | gramps/gui/filters/sidebar/_sidebarfilter.py | 1 | 9432 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from bisect import insort_left
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
from ... import widgets
from ...dbguielement import DbGUIElement
from gramps.gen.config import config
_RETURN = Gdk.keyval_from_name("Return")
_KP_ENTER = Gdk.keyval_from_name("KP_Enter")
class SidebarFilter(DbGUIElement):
_FILTER_WIDTH = 20
_FILTER_ELLIPSIZE = Pango.EllipsizeMode.END
def __init__(self, dbstate, uistate, namespace):
self.signal_map = {
'tag-add' : self._tag_add,
'tag-delete' : self._tag_delete,
'tag-update' : self._tag_update,
'tag-rebuild' : self._tag_rebuild
}
DbGUIElement.__init__(self, dbstate.db)
self.position = 1
self.vbox = Gtk.VBox()
self.table = Gtk.Table(n_rows=4, n_columns=11)
self.vbox.pack_start(self.table, False, False, 0)
self.table.set_border_width(6)
self.table.set_row_spacings(6)
self.table.set_col_spacing(0, 6)
self.table.set_col_spacing(1, 6)
self.apply_btn = Gtk.Button(stock=Gtk.STOCK_FIND)
self.clear_btn = Gtk.Button()
self._init_interface()
uistate.connect('filters-changed', self.on_filters_changed)
dbstate.connect('database-changed', self._db_changed)
self.uistate = uistate
self.dbstate = dbstate
self.namespace = namespace
self.__tag_list = []
self._tag_rebuild()
def _init_interface(self):
self.create_widget()
self.apply_btn.connect('clicked', self.clicked)
hbox = Gtk.HBox()
hbox.show()
image = Gtk.Image()
image.set_from_stock(Gtk.STOCK_UNDO, Gtk.IconSize.BUTTON)
image.show()
label = Gtk.Label(label=_('Reset'))
label.show()
hbox.pack_start(image, False, False, 0)
hbox.pack_start(label, False, True, 0)
hbox.set_spacing(4)
self.clear_btn.add(hbox)
self.clear_btn.connect('clicked', self.clear)
hbox = Gtk.HButtonBox()
hbox.set_layout(Gtk.ButtonBoxStyle.START)
hbox.set_spacing(6)
hbox.set_border_width(12)
hbox.add(self.apply_btn)
hbox.add(self.clear_btn)
hbox.show()
self.vbox.pack_start(hbox, False, False, 0)
self.vbox.show()
def get_widget(self):
return self.vbox
def create_widget(self):
pass
def clear(self, obj):
pass
def clicked(self, obj):
self.uistate.set_busy_cursor(True)
self.clicked_func()
self.uistate.set_busy_cursor(False)
def clicked_func(self):
pass
def get_filter(self):
pass
def add_regex_entry(self, widget):
hbox = Gtk.HBox()
hbox.pack_start(widget, False, False, 12)
self.vbox.pack_start(hbox, False, False, 0)
def add_text_entry(self, name, widget, tooltip=None):
self.add_entry(name, widget)
widget.connect('key-press-event', self.key_press)
if tooltip:
widget.set_tooltip_text(tooltip)
def key_press(self, obj, event):
if not (event.get_state() & Gdk.ModifierType.CONTROL_MASK):
if event.keyval in (_RETURN, _KP_ENTER):
self.clicked(obj)
return False
def add_entry(self, name, widget):
if name:
self.table.attach(widgets.BasicLabel(name),
1, 2, self.position, self.position+1,
xoptions=Gtk.AttachOptions.FILL, yoptions=0)
self.table.attach(widget, 2, 4, self.position, self.position+1,
xoptions=Gtk.AttachOptions.FILL|Gtk.AttachOptions.EXPAND, yoptions=0)
self.position += 1
def on_filters_changed(self, namespace):
"""
Called when filters are changed.
"""
pass
def _db_changed(self, db):
"""
Called when the database is changed.
"""
self._change_db(db)
self.on_db_changed(db)
self._tag_rebuild()
def on_db_changed(self, db):
"""
Called when the database is changed.
"""
pass
def _connect_db_signals(self):
"""
Connect database signals defined in the signal map.
"""
for sig in self.signal_map:
self.callman.add_db_signal(sig, self.signal_map[sig])
def _tag_add(self, handle_list):
"""
Called when tags are added.
"""
for handle in handle_list:
tag = self.dbstate.db.get_tag_from_handle(handle)
insort_left(self.__tag_list, (tag.get_name(), handle))
self.on_tags_changed([item[0] for item in self.__tag_list])
def _tag_update(self, handle_list):
"""
Called when tags are updated.
"""
for handle in handle_list:
item = [item for item in self.__tag_list if item[1] == handle][0]
self.__tag_list.remove(item)
tag = self.dbstate.db.get_tag_from_handle(handle)
insort_left(self.__tag_list, (tag.get_name(), handle))
self.on_tags_changed([item[0] for item in self.__tag_list])
def _tag_delete(self, handle_list):
"""
Called when tags are deleted.
"""
self.__tag_list = [item for item in self.__tag_list
if item[1] not in handle_list]
self.on_tags_changed([item[0] for item in self.__tag_list])
def _tag_rebuild(self):
"""
Called when the tag list needs to be rebuilt.
"""
self.__tag_list = []
for handle in self.dbstate.db.get_tag_handles(sort_handles=True):
tag = self.dbstate.db.get_tag_from_handle(handle)
self.__tag_list.append((tag.get_name(), handle))
self.on_tags_changed([item[0] for item in self.__tag_list])
def on_tags_changed(self, tag_list):
"""
Called when tags are changed.
"""
pass
def add_filter_entry(self, text, widget):
"""
Adds the text and widget to GUI, with an Edit button.
"""
hbox = Gtk.HBox()
hbox.pack_start(widget, True, True, 0)
hbox.pack_start(widgets.SimpleButton(Gtk.STOCK_EDIT, self.edit_filter),
False, False, 0)
self.add_entry(text, hbox)
def edit_filter(self, obj):
"""
Callback which invokes the EditFilter dialog. Will create new
filter if called if none is selected.
"""
from ...editors import EditFilter
from gramps.gen.filters import FilterList, GenericFilterFactory
from gramps.gen.const import CUSTOM_FILTERS
the_filter = None
filterdb = FilterList(CUSTOM_FILTERS)
filterdb.load()
if self.generic.get_active() != 0:
model = self.generic.get_model()
node = self.generic.get_active_iter()
if node:
sel_filter = model.get_value(node, 1)
# the_filter needs to be a particular object for editor
for filt in filterdb.get_filters(self.namespace):
if filt.get_name() == sel_filter.get_name():
the_filter = filt
else:
the_filter = GenericFilterFactory(self.namespace)()
if the_filter:
EditFilter(self.namespace, self.dbstate, self.uistate, [],
the_filter, filterdb,
selection_callback=self.edit_filter_save)
def edit_filter_save(self, filterdb, filter_name):
"""
If a filter changed, save them all. Reloads, and sets name.
Takes the filter database, and the filter name edited.
"""
from gramps.gen.filters import reload_custom_filters
filterdb.save()
reload_custom_filters()
self.on_filters_changed(self.namespace)
self.set_filters_to_name(filter_name)
def set_filters_to_name(self, filter_name):
"""
Resets the Filter combobox to the edited/saved filter.
"""
liststore = self.generic.get_model()
iter = liststore.get_iter_first()
while iter:
filter = liststore.get_value(iter, 1)
if filter and filter.name == filter_name:
self.generic.set_active_iter(iter)
break
iter = liststore.iter_next(iter)
| gpl-2.0 | 512,867,680,985,231,900 | 33.423358 | 95 | 0.586514 | false |
Bioto/Huuey-python | huuey/hue/lights/light.py | 1 | 2617 | from huuey.hue.state import State
from huuey.paths import Paths
class Light:
"""
Description:
Holds data for a single Light from the hues API
Attrs:
state: Holds instance of State()
name: Name of the group
modelid: Type of Light
swversion: Software Version
uniqueid: Machine id for light
_id: ID of light
_controller: Reference to main huuey object
_newname: Holds string for updating the name of the group
"""
state = None
name = None
modelid = None
swversion = None
uniqueid = None
_id = None
_controller = None
_newname = None
def __init__(self, obj, controller):
self._controller = controller
self._map(obj)
def _map(self, obj):
"""
Description:
Maps the passed in data to the current object
"""
for key in obj:
if key == "state":
self.state = State(obj[key], self)
else:
setattr(self, key, obj[key])
def getid(self):
return self._id
def delete(self):
"""
Description:
Deletes the Light from the bridge
"""
return self._controller.request(Paths.LightDEL, additional={
'<id>': self._id
})
def setstate(self, obj):
"""
Description:
Updates the state object to prepare for actual request
"""
if 'rename' in obj:
self._newname = obj['rename']
del obj['rename']
self.state.update(obj)
return self
def update(self):
"""
Description:
Sends request to endpoint then pulls updated data
directly from the API
If _newname is set it will send the request to
update the name first
then trigger main request
"""
if self._newname:
self._controller.request(Paths.LightPUT, {
'name': self._newname
}, additional={
'<id>': self._id
})
self._newname = None
self._controller.request(Paths.LightState, self.state.object(),
additional={
'<id>': self._id
})
self.grab()
def grab(self):
"""
Description:
Pulls fresh data from the API
"""
light = self._controller.request(Paths.LightGET, additional={
'<id>': self._id
})
self._map(light)
| mit | -8,591,549,799,090,077,000 | 24.163462 | 71 | 0.504776 | false |
eviljeff/olympia | src/olympia/files/tests/test_admin.py | 1 | 3053 | from django.utils.encoding import force_text
from olympia.amo.tests import TestCase, addon_factory, user_factory
from olympia.amo.urlresolvers import reverse
class TestFileAdmin(TestCase):
def setUp(self):
self.list_url = reverse('admin:files_file_changelist')
def test_can_list_files_with_admin_advanced_permission(self):
addon = addon_factory()
file_ = addon.current_version.all_files[0]
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 200
assert str(file_.pk) in force_text(response.content)
def test_can_edit_with_admin_advanced_permission(self):
addon = addon_factory()
file_ = addon.current_version.all_files[0]
detail_url = reverse(
'admin:files_file_change', args=(file_.pk,)
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
response = self.client.get(detail_url, follow=True)
assert response.status_code == 200
assert str(file_.id) in force_text(response.content)
assert not file_.is_webextension
post_data = {
'version': file_.version.pk,
'platform': file_.platform,
'filename': file_.filename,
'size': file_.size,
'hash': 'xxx',
'original_hash': 'xxx',
'status': file_.status,
'original_status': file_.original_status,
}
post_data['is_webextension'] = 'on'
response = self.client.post(detail_url, post_data, follow=True)
assert response.status_code == 200
file_.refresh_from_db()
assert file_.is_webextension
def test_can_not_list_without_admin_advanced_permission(self):
user = user_factory(email='someone@mozilla.com')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 403
# Just checking that simply changing the permission resolves
# as wanted
self.grant_permission(user, 'Admin:Advanced')
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 200
def test_detail_view_has_download_link(self):
addon = addon_factory()
file_ = addon.current_version.all_files[0]
detail_url = reverse(
'admin:files_file_change', args=(file_.pk,)
)
user = user_factory(email='someone@mozilla.com')
self.grant_permission(user, 'Admin:Advanced')
self.client.login(email=user.email)
response = self.client.get(detail_url, follow=True)
assert response.status_code == 200
expected_url = file_.get_absolute_url(attachment=True)
assert expected_url in force_text(response.content)
| bsd-3-clause | 1,612,477,954,414,876,000 | 38.649351 | 71 | 0.63151 | false |
sergesyrota/Splunk-license-watchdog | splunk-license-watchdog.py | 1 | 11265 | #!/usr/bin/env python
##################
#
# DEPENDENCIES
#
# Python 2.6+
# Python packages: sys, getopt, requests, time
# Splunk: 4.2+
#
##################
from __future__ import print_function
#
# CONFIGURATION
#
# Authentication information for your Splunk setup
_splunkUser = "user"
_splunkPass = "pass"
# host and port for Splunk server that has license pool info
_licensingServer = "https://splunk.example.com:8089"
# List of inputs that can be disabled or enabled
# You can get a list by a helpful --discover-inputs=<host> flag
# Update inputList by creating a list with all inputs that should be toggled
# Note that you can include multiple hosts, if youhave multiple indexing heads in the same cluster
# Example: inputList = ['https://example.com:8089/servicesNS/nobody/launcher/data/inputs/tcp/cooked/9997',
# 'https://node2.example.com:8089/servicesNS/nobody/system/data/inputs/monitor/%24SPLUNK_HOME%252Fetc%252Fsplunk.version']
_inputList = []
# Action threshold. When current usage crosses _disableThreshold, listed inputs will be disabled.
# When today's usage will be under _enableThreshold - we're assuming new day has started, and inputs will be enabled
# Consider that 1% is ~15 minutes. Set threshold and schedules accordingly.
# Also make sure that script runs before the time you might run out of quota
_disableThreshold = 90
_enableThreshold = 30
#
# END CONFIGURATION
#
# If you change anything below, make sure you know what you're doing :)
#
# Default debug level
# 0 = Fatal errors (stderr) and action information (-q)
# 1 = Informational messages on steps and statuses
# 2 = Verbose output, with splunk responses (-v)
_debugLevel = 1
licensePoolQuery = '| rest /services/licenser/pools | rename title AS Pool | search [rest /services/licenser/groups | search is_active=1 | eval stack_id=stack_ids | fields stack_id] | eval quota=if(isnull(effective_quota),quota,effective_quota) | eval "Used"=round(used_bytes/1024/1024/1024, 3) | eval "Quota"=round(quota/1024/1024/1024, 3) | fields Pool "Used" "Quota"'
import sys
import getopt
import time
import requests
# Suppressing "InsecureRequestWarning" due to self-signed certificate on Splunk servers
requests.packages.urllib3.disable_warnings()
def main(argv):
# at a minimum, auth token should be set, so let's check it right away
if _splunkUser == "user" and _splunkPass == "pass":
debugPrint("Please update user and password to access your Splunk instance and run this script", 0)
showHelp()
sys.exit(1)
try:
opts, args = getopt.getopt(argv, "hc:d:vqED", ["help", "check-license=", "discover-inputs=", "enable-all", "disable-all"])
except getopt.GetoptError:
showHelp()
sys.exit(2)
# First go through non-action arguments and adjust environment variables, before performing actions that will lead to exit.
global _debugLevel
for opt, arg in opts:
if opt in ('-v'):
_debugLevel = 2
if opt in ('-q'):
_debugLevel = 0
# Separate loop for actions that result in exit
for opt, arg in opts:
if opt in ("-h", "--help"):
showHelp()
sys.exit(0)
if opt in ("-c", "--check-license"):
checkLicense(arg)
sys.exit(0)
if opt in ("-d", "--discover-inputs"):
discoverInputs(arg)
sys.exit(0)
if opt in ("-E", "--enable-all"):
enableInputs()
sys.exit(0)
if opt in ("-D", "--disable-all"):
disableInputs()
sys.exit(0)
# Validate that we have needed configuration
if len(_inputList) == 0:
exit("Please adjust the script with your configuration first. Input list is missing.")
# High level sequence:
# Get license details
# If we're over our license quota - enable all inputs. Might as well catch up today, since we'll have a warning anyways.
# If usage is under "enable" threshold: enable all disabled inputs
# If usage is over "disable" threshold: disable all enabled inputs
usage = getLicenseData(_licensingServer)
debugPrint("Quota: %0.3f; Used: %0.3f (%0.1f%%)" % (usage['Quota'], usage['Used'], usage['PercentUsed']), 1)
if usage['PercentUsed'] > 100:
debugPrint("We're over the quota for today! Enabling all disabled inputs to catch up as much as we can:", 1)
enableInputs()
elif usage['PercentUsed'] < _enableThreshold:
debugPrint("Usage is under threshold; Enabling all disabled inputs:", 1)
enableInputs()
elif usage['PercentUsed'] > _disableThreshold:
debugPrint("Usage is over threshold; Disabling all enabled inputs:", 1)
disableInputs()
sys.exit(0)
def disableInputs():
toggleInputs(False)
def enableInputs():
toggleInputs(True)
def toggleInputs(enable):
# Set variables so that we can use unified piece of code to toggle inputs
if enable:
commandSuffix = '/enable'
messageText = 'enabled'
disabledFlag = False
else:
commandSuffix = '/disable'
messageText = 'disabled'
disabledFlag = True
# Take care of all inputs, and make sure they are not in desired state before requesting a change (and also checking that inputs actually exist)
try:
for inputUrl in _inputList:
inputData = splunkRestRequest(inputUrl + '?output_mode=json')
if inputData['entry'][0]['content']['disabled'] == disabledFlag:
debugPrint("Already %s: %s" % (messageText, inputUrl), 2)
else:
# Changing status requires POST
r = splunkRestRequest(inputUrl + commandSuffix, {'output_mode': 'json'})
# Messages = possible problems. Need to verify
for message in r['messages']:
if message['type'] == 'ERROR':
exit("Error toggling input state: " + message['text'])
# Verify that status is correct now:
if r['entry'][0]['content']['disabled'] != disabledFlag:
exit("Error toggling input: %s; Request OK, but input not %s." % (inputUrl, messageText))
debugPrint("%s: %s" % (messageText, inputUrl), 1)
except IndexError as e:
exit("ERROR wotking with Splunk input toggles; unexpected data: %s" % str(e))
except KeyError as e:
exit("ERROR wotking with Splunk input toggles; unexpected data; key %s does not exist " % str(e))
# Helper function to use during setup. Just displays aggregated license quota and today's usage
def checkLicense(host):
debugPrint("Checking license info on " + host, 0)
data = getLicenseData(host)
debugPrint("Licensing quota: %0.3f GiB" % data['Quota'], 0)
debugPrint("Used today: %0.3f GiB (%0.1f%%)" % (data['Used'], data['PercentUsed']), 0)
# Helper function to use during setup. Just shows all inputs found on Splunk host (indexing head)
def discoverInputs(host):
debugPrint("Discovering inputs at " + host, 0)
data = splunkRestRequest(host + '/servicesNS/' + _splunkUser + '/launcher/data/inputs/all?output_mode=json')
for entry in data['entry']:
# entry will have links. We're interested in seeing ones we can disable and enable, so those are the links we're checking to validate (and skipping the rest)
# then grab entry link itself from "alternate" (so that we can add /disable or /enable later)
if 'enable' in entry['links'] or 'disable' in entry['links']:
status = "Unknown: "
if entry['content']['disabled']:
status = "Disabled: "
else:
status = "Enabled: "
debugPrint(status + host + entry['links']['alternate'], 0)
debugPrint("""
Review links above. Identify which ones you want to disable when you are approaching
license limit, then update top of the file by copying them in there.
Generally, you don't want to disable any internal indexing. You also need to consider if
data loss is what you can tollerate or want to achieve (e.g. disabling file input past its
rotation schedule will lead to loss of data between disabling and enabling). If you're
using Splunk forwarders, though, they have their own cache, so disabling tcp input they
pipe to should be safe.""", 0)
# Runs Splunk query to get license pool information, and aggregate results, presenting only usage/quota information
def getLicenseData(host):
data = splunkQuery(host, licensePoolQuery)
try:
used = float(data['result']['Used'])
quota = float(data['result']['Quota'])
if used < 0 or quota <= 0:
exit("Error getting license data. Invalid response received: %s" % data)
return {'Quota': quota, 'Used': used, 'PercentUsed': 100*used/quota}
except KeyError:
exit("Error getting license data. Invalid response received: %s" % data)
# Generic function to run splunk query on a given node, and parse our JSON response
def splunkQuery(host, query):
debugPrint("Running Splunk query: '%s' on host '%s'" % (query, host), 2)
payload = {'search': query, 'output_mode': 'json', 'exec_mode': 'oneshot'}
return splunkRestRequest(host + '/servicesNS/' + _splunkUser + '/search/search/jobs/export/', payload)
# Data format is always expected to be JSON, so need to make sure it's either in URL explicitly, or in post data when this function is used
def splunkRestRequest(url, postData=None):
try:
# No post means we're making a GET request
if postData is None:
r = requests.get(url, auth=(_splunkUser, _splunkPass), verify=False)
debugPrint(r.text, 2)
return r.json()
else:
r = requests.post(url, postData, auth=(_splunkUser, _splunkPass), verify=False)
debugPrint(r.text, 2)
return r.json()
except requests.exceptions.RequestException as e:
exit("ERROR communicating with Splunk server (%s): %s", (url, str(e)))
def showHelp():
print("""
USAGE: splunk-license-monitor.py [options...]
Running without arguments would execute logic. Helper commands can help with config, but require
authentication variables to be set in the file.
-c/--check-license <url> Attempts to retrieve license information from provided
Splunk node (Requires auth info) protocol://host:port resuired, e.g.:
https://your.server.com:8089
-d/--discover-inputs <url> Discovers all inputs and current states
from provided Splunk node (requires auth parameters to be configured)
protocol://host:port resuired, e.g.:
https://your.server.com:8089
-D/--disable-all Disable all inputs that have been configured
-E/--enable-all Enable all inputs that have been configured
-h/--help This help text
-q Quiet mode (errors only)
-v Verbose output (including Splunk queries
""")
def debugPrint(message, level):
if _debugLevel >= level:
print("%s - %s" % (time.strftime("%Y-%m-%d %H:%M:%S"), message))
def exit(message, retval=1):
print(message, file=sys.stderr)
sys.exit(retval)
main(sys.argv[1:])
| mit | -5,825,861,814,609,172,000 | 44.06 | 370 | 0.653262 | false |
calico/basenji | bin/basenji_fetch_app2.py | 1 | 10874 | #!/usr/bin/env python
from optparse import OptionParser
import collections
import functools
import os
import pdb
import sys
import numpy as np
import pandas as pd
import h5py
from google.cloud import bigquery
import dash
import dash_table as dt
import dash.dependencies as dd
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from basenji.sad5 import ChrSAD5
'''
basenji_fetch_app.py
Run a Dash app to enable SAD queries.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <sad_hdf5_path>'
parser = OptionParser(usage)
parser.add_option('-c', dest='chrom_hdf5',
default=False, action='store_true',
help='HDF5 files split by chromosome [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide SAD HDF5')
else:
sad_h5_path = args[0]
#############################################
# precursors
print('Preparing data...', end='', flush=True)
sad5 = ChrSAD5(sad_h5_path, index_chr=True)
print('DONE.', flush=True)
#############################################
# layout
column_widths = [('SNP',150), ('Association',125),
('Score',125), ('ScoreQ',125), ('R',125),
('Experiment',125), ('Description',200)]
scc = [{'if': {'column_id': cw[0]}, 'width':cw[1]} for cw in column_widths]
app = dash.Dash(__name__)
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
app.layout = html.Div([
html.Div([
html.H1('Basenji SNP activity difference'),
dcc.Markdown('Instructions...'),
html.Div([
html.Label('Datasets'),
dcc.Dropdown(
id='dataset',
options=[
{'label':'CAGE', 'value':'CAGE'},
{'label':'DNase', 'value':'DNASE'},
{'label':'H3K4me3', 'value':'CHIP:H3K4me3'},
{'label':'All', 'value':'All'}
],
value='CAGE'
)
], style={'width': '250', 'display': 'inline-block'}),
html.Div([
html.Label('Population'),
dcc.Dropdown(
id='population',
options=[
{'label':'-', 'value':'-'},
{'label':'1kG African', 'value':'AFR'},
{'label':'1kG American', 'value':'AMR'},
{'label':'1kG East Asian', 'value':'EAS'},
{'label':'1kG European', 'value':'EUR'},
{'label':'1kG South Asian', 'value':'SAS'}
],
value='-'
)
], style={'width': '250', 'display': 'inline-block'}),
html.Div([
html.Label('SNP ID'),
dcc.Input(id='snp_id', value='rs6656401', type='text'),
html.Button(id='snp_submit', n_clicks=0, children='Submit')
], style={'display': 'inline-block', 'float': 'right'})
], style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px'
}),
dcc.Graph(id='assoc_plot'),
html.Div([
dt.DataTable(
id='table',
data=[],
columns=[{'id':cw[0],'name':cw[0]} for cw in column_widths],
style_cell_conditional=scc,
editable=False,
filtering=True,
sorting=True,
n_fixed_rows=20
)
])
])
# html.Div([
# dt.DataTable(
# id='table',
# data=[],
# columns=[cw[0] for cw in column_widths],
# style_cell_conditional=scc,
# editable=False,
# filtering=True,
# sorting=True,
# n_fixed_rows=20
# )
#############################################
# callback helpers
@memoized
def query_ld(population, snp_id):
try:
sad5.set_population(population)
except ValueError:
print('Population unavailable.', file=sys.stderr)
return pd.DataFrame()
chrm, snp_i = sad5.snp_chr_index(snp_id)
pos = sad5.snp_pos(snp_i, chrm)
if chrm is None:
return pd.DataFrame()
else:
return sad5.emerald_vcf.query_ld(snp_id, chrm, pos, ld_threshold=0.8)
@memoized
def read_sad(chrm, snp_i, verbose=True):
"""Read SAD scores from HDF5 for the given SNP index."""
if verbose:
print('Reading SAD!', file=sys.stderr)
# read SAD
snp_sad = sad5.chr_sad5[chrm][snp_i].astype('float64')
# read percentiles
snp_pct = sad5.chr_sad5[chrm].sad_pct(snp_sad)
return snp_sad, snp_pct
def snp_rows(snp_id, dataset, ld_r2=1., verbose=True):
"""Construct table rows for the given SNP id and its LD set
in the given dataset."""
rows = []
# search for SNP
# chrom, snp_i = snp_indexes.get(snp_id, (None,None))
chrm, snp_i = sad5.snp_chr_index(snp_id)
if chrm is not None:
# SAD
snp_sad, snp_pct = read_sad(chrm, snp_i)
# round floats
snp_sad = np.around(snp_sad,4)
snp_assoc = np.around(snp_sad*ld_r2, 4)
ld_r2_round = np.around(ld_r2, 4)
# extract target scores and info
for ti, tid in enumerate(sad5.target_ids):
if dataset == 'All' or sad5.target_labels[ti].startswith(dataset):
rows.append({
'SNP': snp_id,
'Association': snp_assoc[ti],
'Score': snp_sad[ti],
'ScoreQ': snp_pct[ti],
'R': ld_r2_round,
'Experiment': tid,
'Description': sad5.target_labels[ti]})
elif verbose:
print('Cannot find %s in snp_indexes.' % snp_id)
return rows
def make_data_mask(dataset):
"""Make a mask across targets for the given dataset."""
dataset_mask = []
for ti, tid in enumerate(sad5.target_ids):
if dataset == 'All':
dataset_mask.append(True)
else:
dataset_mask.append(sad5.target_labels[ti].startswith(dataset))
return np.array(dataset_mask, dtype='bool')
def snp_scores(snp_id, dataset, ld_r2=1.):
"""Compute an array of scores for this SNP
in the specified dataset."""
dataset_mask = make_data_mask(dataset)
scores = np.zeros(dataset_mask.sum(), dtype='float64')
# search for SNP
chrm, snp_i = sad5.snp_chr_index(snp_id)
if snp_i is not None:
# read SAD
snp_sad, _ = read_sad(chrm, snp_i)
# filter datasets
snp_sad = snp_sad[dataset_mask]
# add
scores += snp_sad*ld_r2
return scores
#############################################
# callbacks
@app.callback(
dd.Output('table', 'data'),
[dd.Input('snp_submit', 'n_clicks')],
[
dd.State('snp_id','value'),
dd.State('dataset','value'),
dd.State('population','value')
]
)
def update_table(n_clicks, snp_id, dataset, population, verbose=True):
"""Update the table with a new parameter set."""
if verbose:
print('Tabling')
# add snp_id rows
rows = snp_rows(snp_id, dataset)
if population != '-':
df_ld = query_ld(population, snp_id)
for i, v in df_ld.iterrows():
rows += snp_rows(v.snp, dataset, v.r)
return rows
@app.callback(
dd.Output('assoc_plot', 'figure'),
[dd.Input('snp_submit', 'n_clicks')],
[
dd.State('snp_id','value'),
dd.State('dataset','value'),
dd.State('population','value')
]
)
def update_plot(n_clicks, snp_id, dataset, population, verbose=True):
if verbose:
print('Plotting')
target_mask = make_data_mask(dataset)
# add snp_id rows
query_scores = snp_scores(snp_id, dataset)
if population != '-':
df_ld = query_ld(population, snp_id)
for i, v in df_ld.iterrows():
query_scores += snp_scores(v.snp, dataset, v.r)
# sort
sorted_indexes = np.argsort(query_scores)
# range
ymax = np.abs(query_scores).max()
ymax *= 1.2
return {
'data': [go.Scatter(
x=np.arange(len(query_scores)),
y=query_scores[sorted_indexes],
text=sad5.target_ids[target_mask][sorted_indexes],
mode='markers'
)],
'layout': {
'height': 400,
'margin': {'l': 20, 'b': 30, 'r': 10, 't': 10},
'yaxis': {'range': [-ymax,ymax]},
'xaxis': {'range': [-1,1+len(query_scores)]}
}
}
#############################################
# run
app.scripts.config.serve_locally = True
app.run_server(debug=False, port=8787)
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| apache-2.0 | 4,630,189,006,834,960,000 | 30.068571 | 86 | 0.469101 | false |
OpenTTD-Ladder/ladder-web | ladder/matchmaking/admin.py | 1 | 1149 | from django.contrib import admin
from translations.admin import TranslationInline
from .models import Ladder, LadderTranslation
class LadderTranslationAdmin(TranslationInline):
model = LadderTranslation
class LadderAdmin(admin.ModelAdmin):
inlines = [LadderTranslationAdmin]
fieldsets = (
(None, {'fields': [('max_slots', 'signup_confirm'),]}),
('Dates', {'fields': [('ladder_start', 'ladder_ends'),]}),
('Signup', {'fields': [('signup_start', 'signup_ends'),]}),
('Rating', {'fields': [('default_mu', 'default_draw'),
('default_sigma', 'default_beta'),
'default_tau']})
)
list_display = [
'translation',
'max_slots',
'is_active',
'is_signup',
'signup_confirm',
]
def get_readonly_fields(self, request, obj=None):
if obj is not None and obj.pk:
return self.readonly_fields + ('default_mu', 'default_sigma',
'default_beta', 'default_tau', 'default_draw',)
return self.readonly_fields
admin.site.register(Ladder, LadderAdmin) | gpl-2.0 | -7,043,679,489,068,449,000 | 31.857143 | 74 | 0.575283 | false |
JshWright/home-assistant | homeassistant/components/notify/telegram.py | 2 | 6078 | """
Telegram platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.telegram/
"""
import io
import logging
import urllib
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_DATA, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import (
CONF_API_KEY, ATTR_LOCATION, ATTR_LATITUDE, ATTR_LONGITUDE)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['python-telegram-bot==5.3.1']
ATTR_PHOTO = 'photo'
ATTR_KEYBOARD = 'keyboard'
ATTR_DOCUMENT = 'document'
ATTR_CAPTION = 'caption'
ATTR_URL = 'url'
ATTR_FILE = 'file'
ATTR_USERNAME = 'username'
ATTR_PASSWORD = 'password'
CONF_CHAT_ID = 'chat_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_CHAT_ID): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Telegram notification service."""
import telegram
try:
chat_id = config.get(CONF_CHAT_ID)
api_key = config.get(CONF_API_KEY)
bot = telegram.Bot(token=api_key)
username = bot.getMe()['username']
_LOGGER.debug("Telegram bot is '%s'", username)
except urllib.error.HTTPError:
_LOGGER.error("Please check your access token")
return None
return TelegramNotificationService(api_key, chat_id)
def load_data(url=None, file=None, username=None, password=None):
"""Load photo/document into ByteIO/File container from a source."""
try:
if url is not None:
# Load photo from URL
if username is not None and password is not None:
req = requests.get(url, auth=(username, password), timeout=15)
else:
req = requests.get(url, timeout=15)
return io.BytesIO(req.content)
elif file is not None:
# Load photo from file
return open(file, "rb")
else:
_LOGGER.warning("Can't load photo no photo found in params!")
except OSError as error:
_LOGGER.error("Can't load photo into ByteIO: %s", error)
return None
class TelegramNotificationService(BaseNotificationService):
"""Implement the notification service for Telegram."""
def __init__(self, api_key, chat_id):
"""Initialize the service."""
import telegram
self._api_key = api_key
self._chat_id = chat_id
self.bot = telegram.Bot(token=self._api_key)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
import telegram
title = kwargs.get(ATTR_TITLE)
data = kwargs.get(ATTR_DATA)
# Exists data for send a photo/location
if data is not None and ATTR_PHOTO in data:
photos = data.get(ATTR_PHOTO, None)
photos = photos if isinstance(photos, list) else [photos]
for photo_data in photos:
self.send_photo(photo_data)
return
elif data is not None and ATTR_LOCATION in data:
return self.send_location(data.get(ATTR_LOCATION))
elif data is not None and ATTR_DOCUMENT in data:
return self.send_document(data.get(ATTR_DOCUMENT))
elif data is not None and ATTR_KEYBOARD in data:
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
return self.send_keyboard(message, keys)
if title:
text = '{} {}'.format(title, message)
else:
text = message
parse_mode = telegram.parsemode.ParseMode.MARKDOWN
# Send message
try:
self.bot.sendMessage(
chat_id=self._chat_id, text=text, parse_mode=parse_mode)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending message")
def send_keyboard(self, message, keys):
"""Display keyboard."""
import telegram
keyboard = telegram.ReplyKeyboardMarkup([
[key.strip() for key in row.split(",")] for row in keys])
try:
self.bot.sendMessage(
chat_id=self._chat_id, text=message, reply_markup=keyboard)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending message")
def send_photo(self, data):
"""Send a photo."""
import telegram
caption = data.get(ATTR_CAPTION)
# Send photo
try:
photo = load_data(
url=data.get(ATTR_URL),
file=data.get(ATTR_FILE),
username=data.get(ATTR_USERNAME),
password=data.get(ATTR_PASSWORD),
)
self.bot.sendPhoto(
chat_id=self._chat_id, photo=photo, caption=caption)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending photo")
def send_document(self, data):
"""Send a document."""
import telegram
caption = data.get(ATTR_CAPTION)
# send photo
try:
document = load_data(
url=data.get(ATTR_URL),
file=data.get(ATTR_FILE),
username=data.get(ATTR_USERNAME),
password=data.get(ATTR_PASSWORD),
)
self.bot.sendDocument(
chat_id=self._chat_id, document=document, caption=caption)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending document")
def send_location(self, gps):
"""Send a location."""
import telegram
latitude = float(gps.get(ATTR_LATITUDE, 0.0))
longitude = float(gps.get(ATTR_LONGITUDE, 0.0))
# Send location
try:
self.bot.sendLocation(
chat_id=self._chat_id, latitude=latitude, longitude=longitude)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending location")
| apache-2.0 | -8,644,028,738,815,384,000 | 31.15873 | 78 | 0.607108 | false |
wangjeaf/CSSCheckStyle | setup.py | 1 | 2417 | #!/usr/bin/env python
#
# Copyright 2012 The CSSCheckStyle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
try:
from setuptools import setup
except ImportError:
print '[CKstyle] ERROR please install "easy_install" firstly'
sys.exit(0)
from distutils.command.install_data import install_data
import os
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
ckstyle_dir = 'ckstyle'
for dirpath, dirnames, filenames in os.walk(ckstyle_dir):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.') or dirname.startswith('_') : del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name = "CSSCheckStyle",
version = '1.0.0',
url = 'https://github.com/wangjeaf/CSSCheckStyle',
author = 'wangjeaf',
author_email = 'wangjeaf@gmail.com',
description = 'Check Code Style and more, for CSS.',
download_url = 'https://github.com/wangjeaf/CSSCheckStyle/archive/master.tar.gz',
install_requires=['python-gflags'],
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
entry_points = {
'console_scripts': [
'ckstyle = ckstyle.command.index:ckstyle'
]
},
classifiers = ['Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development :: CSS'
],
)
| bsd-3-clause | -7,363,632,864,279,290,000 | 31.226667 | 85 | 0.65784 | false |
joshmoore/zeroc-ice | java/test/Freeze/fileLock/run.py | 1 | 1622 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise "can't find toplevel directory!"
sys.path.append(os.path.join(path[0]))
from scripts import *
dbdir = os.path.join(os.getcwd(), "db")
TestUtil.cleanDbDir(dbdir)
print "testing Freeze file lock...",
sys.stdout.flush()
client = os.path.join("test.Freeze.fileLock.Client")
clientFail = os.path.join("test.Freeze.fileLock.ClientFail")
clientExe = TestUtil.startClient(client, "", None, None, False)
clientExe.expect('File lock acquired.\.*')
clientFailExe = TestUtil.startClient(clientFail, "", None, None, False)
clientFailExe.expect('File lock not acquired.')
clientFailExe.wait()
# send some output to client to terminate it.
clientExe.sendline('go')
clientExe.expect('File lock released.')
clientExe.wait()
# The lock is gone try to acquire it again.
clientExe = TestUtil.startClient(client, "", None, None, False)
clientExe.expect('File lock acquired.\.*')
clientExe.sendline('go')
clientExe.expect('File lock released.')
clientExe.wait()
print "ok"
| gpl-2.0 | -2,917,443,800,651,901,400 | 31.44 | 103 | 0.634402 | false |
ibackus/diskpy | diskpy/ICgen/sigma_profile.py | 1 | 9111 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 23 10:17:53 2014
@author: ibackus
"""
# External modules
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
# diskpy modules
from diskpy.pdmath import smoothstep
from diskpy.utils import match_units
def make_profile(ICobj):
"""
A wrapper for generating surface density profiles according to the IC object.
Settings for the profile are defined in ICobj.settings. Which profile gets
used is defined by ICobj.settings.sigma.kind
Currently available kinds are:
viscous
powerlaw
MQWS
**RETURNS**
r : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
kind = ICobj.settings.sigma.kind
if kind == 'powerlaw':
r, sigma = powerlaw(ICobj.settings, ICobj.T)
elif (kind == 'mqws') | (kind == 'MQWS'):
r, sigma = MQWS(ICobj.settings, ICobj.T)
elif (kind == 'viscous'):
r, sigma = viscous(ICobj.settings)
elif (kind == 'gaussring'):
r, sigma = gaussian_ring(ICobj.settings)
else:
raise TypeError, 'Could not make profile for kind {0}'.format(kind)
if hasattr(ICobj.settings.sigma, 'innercut'):
sigma = _applycut(r, sigma, ICobj.settings.sigma.innercut, False)
if hasattr(ICobj.settings.sigma, 'outercut'):
sigma = _applycut(r, sigma, ICobj.settings.sigma.outercut, True)
return r, sigma
def _applycut(r, sigma, rcut, outer=True):
"""
Applies a hard cut to a surface density profile (sigma). If outer=True,
sigma = 0 at r > rcut. Otherwise, sigma = 0 at r < rcut. If rcut is
None, inf, or nan no cut is performed.
"""
if rcut is None:
return sigma
elif np.isnan(rcut) or np.isinf(rcut):
return sigma
if outer:
mask = r > rcut
else:
mask = r < rcut
if np.any(mask):
sigma[mask] = 0
return sigma
def gaussian_ring(settings):
"""
Generates a gaussian ring surface density profile according to:
.. math:: \\Sigma = \\Sigma_0 exp(-(R-R_d)^2/2a^2)
.. math:: \\Sigma_0 = M_d/(2\\pi)^{3/2} a R_d
Here we call a the ringwidth.
The max radius is determined automatically
Parameters
----------
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
Returns
-------
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
Rd = settings.sigma.Rd
ringwidth = settings.sigma.ringwidth
n_points = settings.sigma.n_points
m_disk = settings.sigma.m_disk
Rmax = (Rd + 5*ringwidth).in_units(Rd.units)
Rmax = max(Rmax, Rd*2.0)
R = SimArray(np.linspace(0, Rmax, n_points), Rd.units)
sigma0 = m_disk / (ringwidth * Rd)
sigma0 *= (2*np.pi)**-1.5
expArg = -(R-Rd)**2 / (2*ringwidth**2)
expArg.convert_units('1')
sigma = sigma0 * np.exp(expArg)
return R, sigma
def viscous(settings):
"""
Generates a surface density profile derived from a self-similarity solution
for a viscous disk, according to:
sigma ~ r^-gamma exp(-r^(2-gamma))
Where r is a dimensionless radius and gamma is a constant less than 2.
Rd (disk radius) is defined as the radius containing 95% of the disk mass
**ARGUMENTS**
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
**RETURNS**
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
Rd = settings.sigma.Rd
rin = settings.sigma.rin
rmax = settings.sigma.rmax
n_points = settings.sigma.n_points
gamma = settings.sigma.gamma
m_disk = settings.sigma.m_disk
# Define the fraction of mass contained within Rd
A = 0.95
# Normalization for r
R1 = Rd / (np.log(1/(1-A))**(1/(2-gamma)))
Rmax = rmax * Rd
Rin = rin * Rd
R = np.linspace(0, Rmax, n_points)
r = (R/R1).in_units('1')
sigma = (r**-gamma) * np.exp(-r**(2-gamma)) * (m_disk/(2*np.pi*R1*R1)) * (2-gamma)
# Deal with infinities at the origin with a hard cut off
sigma[0] = sigma[1]
# Apply interior cutoff
cut_mask = R < Rin
if np.any(cut_mask):
sigma[cut_mask] *= smoothstep(r[cut_mask],degree=21,rescale=True)
return R, sigma
def powerlaw(settings, T = None):
"""
Generates a surface density profile according to a powerlaw sigma ~ r^p
with a smooth interior cutoff and smooth exterior exponential cutoff.
**ARGUMENTS**
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
T : callable function
Function that returns temperature of the disk as a function of radius
IF none, a powerlaw temperature is assumed
**RETURNS**
R : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
# Parse settings
Rd = settings.sigma.Rd
rin = settings.sigma.rin
rmax = settings.sigma.rmax
cutlength = settings.sigma.cutlength
Mstar = settings.physical.M
Qmin = settings.sigma.Qmin
n_points = settings.sigma.n_points
m = settings.physical.m
power = settings.sigma.power
gamma = settings.physical.gamma_cs()
if T is None:
# If no callable object to calculate Temperature(R) is provided,
# default to a powerlaw T ~ R^-q
T0 = SimArray([129.0],'K') # Temperature at 1 AU
R0 = SimArray([1.0],'au')
q = 0.59
def T(x):
return T0 * np.power((x/R0).in_units('1'),-q)
Rd = match_units(pynbody.units.au, Rd)[1]
Mstar = match_units(pynbody.units.Msol, Mstar)[1]
# Molecular weight
m = match_units(m, pynbody.units.m_p)[0]
# Maximum R to calculate sigma at (needed for the exponential cutoff region)
Rmax = rmax*Rd
# Q calculation parameters:
G = SimArray([1.0],'G')
kB = SimArray([1.0],'k')
# Initialize stuff
A = SimArray(1.0,'Msol')/(2*np.pi*np.power(Rd,2))
# dflemin3 Nov. 4, 2015
# Made units more explicit via SimArrays
r_units = Rd.units
R = SimArray(np.linspace(0,Rmax,n_points),r_units)
r = R/Rd
# Calculate sigma
# Powerlaw
#dflemin3 edit 06/10/2015: Try powerlaw of the form sigma ~ r^power
sigma = A*np.power(r,power)
sigma[0] = 0.0
# Exterior cutoff
sigma[r>1] *= np.exp(-(r[r>1] - 1)**2 / (2*cutlength**2))
# Interior cutoff
sigma[r<rin] *= smoothstep(r[r<rin],degree=21,rescale=True)
# Calculate Q
Q = np.sqrt(Mstar*gamma*kB*T(R)/(G*m*R**3))/(np.pi*sigma)
Q.convert_units('1')
# Rescale sigma to meet the minimum Q requirement
sigma *= Q.min()/Qmin
# Calculate Q
Q = np.sqrt(Mstar*gamma*kB*T(R)/(G*m*R**3))/(np.pi*sigma)
Q.convert_units('1')
return R, sigma
def MQWS(settings, T):
"""
Generates a surface density profile as the per method used in Mayer, Quinn,
Wadsley, and Stadel 2004
** ARGUMENTS **
NOTE: if units are not supplied, assumed units are AU, Msol
settings : IC settings
settings like those contained in an IC object (see ICgen_settings.py)
T : callable
A function to calculate temperature as a function of radius
** RETURNS **
r : SimArray
Radii at which sigma is calculated
sigma : SimArray
Surface density profile as a function of R
"""
# Q calculation parameters:
G = SimArray([1.0],'G')
kB = SimArray([1.0],'k')
# Load in settings
n_points = settings.sigma.n_points
rin = settings.sigma.rin
rout = settings.sigma.rout
rmax = settings.sigma.rmax
Qmin = settings.sigma.Qmin
m = settings.physical.m
Mstar = settings.physical.M
#m_disk = settings.sigma.m_disk
rin = match_units(pynbody.units.au, rin)[1]
rout = match_units(pynbody.units.au, rout)[1]
#m_disk = match_units(pynbody.units.Msol, m_disk)[1]
if rmax is None:
rmax = 2.5 * rout
else:
rmax = match_units(pynbody.units.au, rmax)[1]
r = np.linspace(0, rmax, n_points)
a = (rin/r).in_units('1')
b = (r/rout).in_units('1')
sigma = (np.exp(-a**2 - b**2)/r) * Mstar.units/r.units
# Calculate Q
Q = np.sqrt(Mstar*kB*T(r)/(G*m*r**3))/(np.pi*sigma)
Q.convert_units('1')
sigma *= np.nanmin(Q)/Qmin
# Remove all nans
sigma[np.isnan(sigma)] = 0.0
return r, sigma
| mit | -394,328,247,553,542,200 | 25.955621 | 89 | 0.584897 | false |
TheArtling/django-active-users-stats | active_users/tests/test_settings.py | 1 | 1637 | """Settings that need to be set in order to run the tests."""
import os
DEBUG = True
SITE_ID = 1
APP_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
ROOT_URLCONF = 'active_users.tests.urls'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(APP_ROOT, '../app_static')
MEDIA_ROOT = os.path.join(APP_ROOT, '../app_media')
STATICFILES_DIRS = (
os.path.join(APP_ROOT, 'static'),
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(APP_ROOT, 'tests/test_app/templates')],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
)
}
}]
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
]
INTERNAL_APPS = [
'active_users',
'active_users.tests.test_app',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
SECRET_KEY = 'foobar'
| mit | 5,655,990,248,203,060,000 | 23.432836 | 65 | 0.654856 | false |
erigones/esdc-ce | api/dc/template/api_views.py | 1 | 3081 | from django.utils.translation import ugettext_noop as _
from api import status
from api.api_views import APIView
from api.exceptions import PreconditionRequired, ObjectAlreadyExists
from api.task.response import SuccessTaskResponse
from api.utils.db import get_object
from api.dc.utils import remove_dc_binding_virt_object
from api.dc.template.serializers import TemplateSerializer
from api.dc.messages import LOG_TEMPLATE_ATTACH, LOG_TEMPLATE_DETACH
from api.template.messages import LOG_TEMPLATE_UPDATE
from vms.models import VmTemplate
class DcTemplateView(APIView):
serializer = TemplateSerializer
order_by_default = order_by_fields = ('name',)
def __init__(self, request, name, data):
super(DcTemplateView, self).__init__(request)
self.data = data
self.name = name
if name:
attrs = {'name': name}
if request.method != 'POST':
attrs['dc'] = request.dc
self.vmt = get_object(request, VmTemplate, attrs, sr=('owner', 'dc_bound'), exists_ok=True,
noexists_fail=True)
else:
self.vmt = VmTemplate.objects.select_related('owner', 'dc_bound').filter(dc=request.dc)\
.exclude(access__in=VmTemplate.INVISIBLE)\
.order_by(*self.order_by)
def get(self, many=False):
if many or not self.name:
if self.full:
if self.vmt:
res = self.serializer(self.request, self.vmt, many=True).data
else:
res = []
else:
res = list(self.vmt.values_list('name', flat=True))
else:
res = self.serializer(self.request, self.vmt).data
return SuccessTaskResponse(self.request, res)
def _remove_dc_binding(self, res):
if self.vmt.dc_bound:
remove_dc_binding_virt_object(res.data.get('task_id'), LOG_TEMPLATE_UPDATE, self.vmt,
user=self.request.user)
def post(self):
dc, vmt = self.request.dc, self.vmt
if vmt.dc.filter(id=dc.id).exists():
raise ObjectAlreadyExists(model=VmTemplate)
ser = self.serializer(self.request, vmt)
vmt.dc.add(dc)
res = SuccessTaskResponse(self.request, ser.data, obj=vmt, status=status.HTTP_201_CREATED,
detail_dict=ser.detail_dict(), msg=LOG_TEMPLATE_ATTACH)
self._remove_dc_binding(res)
return res
def delete(self):
dc, vmt = self.request.dc, self.vmt
if dc.vm_set.filter(template=vmt).exists():
raise PreconditionRequired(_('Template is used by some VMs'))
ser = self.serializer(self.request, vmt)
vmt.dc.remove(dc)
res = SuccessTaskResponse(self.request, None, obj=vmt, detail_dict=ser.detail_dict(), msg=LOG_TEMPLATE_DETACH)
self._remove_dc_binding(res)
return res
| apache-2.0 | 5,641,864,116,140,040,000 | 37.037037 | 119 | 0.586173 | false |
isralopez/geonode | setup.py | 1 | 5140 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils not to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
geonode_dir = 'geonode'
for dirpath, dirnames, filenames in os.walk(geonode_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(name='GeoNode',
version=__import__('geonode').get_version(),
description="Application for serving and sharing geospatial data",
long_description=open('README').read(),
classifiers=[
"Development Status :: 4 - Beta"],
keywords='',
author='GeoNode Developers',
author_email='dev@geonode.org',
url='http://geonode.org',
license='GPL',
packages=packages,
data_files=data_files,
install_requires=[
## The commented name next to the package
## is the Ubuntu 14.04 package that provides it.
## Apps with official Ubuntu 14.04 packages
# native dependencies
"pillow", # python-pillow
"lxml", # python-lxml
# "psycopg2==2.4.5", # python-psycopg2
"Django >=1.6.1, <=1.6.5", # python-django
# Other
"beautifulsoup4==4.2.1", # python-bs4
"MultipartPostHandler==0.1.0", # python-multipartposthandler
"httplib2==0.8", # python-httplib2
"transifex-client==0.10", # transifex-client
"Paver==1.2.1", # python-paver
"nose <=1.0, <=1.3.1", # python-nose
"django-nose==1.2", # python-django-nose
# Django Apps
"django-pagination >=1.0.5, <=1.0.7", # python-django-pagination
"django-jsonfield==0.9.12", # python-django-jsonfield
"django-extensions==1.2.5", # python-django-extensions
"django-taggit==0.12", # python-django-taggit
"django-mptt==0.6.1", # django-mptt
"django-guardian==1.2.0", #django-guardian
# "django-admin-bootstrapped==1.6.5", #django-admin-bootstrapped
## Apps with packages provided in GeoNode's PPA on Launchpad.
"pinax-theme-bootstrap==3.0a11",
"pinax-theme-bootstrap-account==1.0b2",
"django-forms-bootstrap==2.0.3.post1",
"django-friendly-tag-loader==1.1",
"django-taggit-templatetags==0.4.6dev",
"django-activity-stream==0.4.5beta1",
"django-downloadview==1.2",
"django-tastypie==0.11.0",
"django-polymorphic==0.5.3",
"django-leaflet==0.13.2",
"django-autocomplete-light==1.4.13",
"django-modeltranslation",
# GeoNode org maintained apps.
"django-geoexplorer==4.0.4",
"geonode-user-messages==0.1.1",
"geonode-avatar==2.1.3",
"geonode-announcements==1.0.3",
"geonode-agon-ratings==0.3.1",
"geonode-user-accounts==1.0.3",
"geonode-arcrest==10.2",
"geonode-notification==1.1.1",
"geonode-dialogos==0.4",
"gsconfig==0.6.10",
"gsimporter==0.1",
"gisdata==0.5.4",
# GeoPython dependencies
"OWSLib >=0.7.2, <=0.8.7",
"pycsw >=1.6.4, <=1.8.2",
# haystack/elasticsearch, uncomment to use
"django-haystack==2.1.0",
"pyelasticsearch==0.6.1"
],
zip_safe=False,
)
| gpl-3.0 | -7,154,889,025,022,198,000 | 35.197183 | 104 | 0.609728 | false |
hradec/cortex | test/IECoreHoudini/FromHoudiniPointsConverter.py | 1 | 49213 | ##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import hou
import IECore
import IECoreHoudini
import unittest
import os
import math
class TestFromHoudiniPointsConverter( IECoreHoudini.TestCase ) :
def createBox( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
return box
def createTorus( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
torus.parm( "rows" ).set( 10 )
torus.parm( "cols" ).set( 10 )
return torus
def createPoints( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
points = geo.createNode( "scatter" )
points.parm( "npts" ).set( 5000 )
facet.setInput( 0, box )
points.setInput( 0, facet )
return points
# creates a converter
def testCreateConverter( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniPointsConverter( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
return converter
# creates a converter
def testFactory( self ) :
box = self.createBox()
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPolygonsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( box, resultType = IECore.TypeId.Parameter )
self.assertEqual( converter, None )
self.failUnless( IECore.TypeId.PointsPrimitive in IECoreHoudini.FromHoudiniGeometryConverter.supportedTypes() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( IECore.TypeId.PointsPrimitive )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.createDummy( [ IECore.TypeId.PointsPrimitive ] )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# performs geometry conversion
def testDoConversion( self ) :
converter = self.testCreateConverter()
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
def testConvertFromHOMGeo( self ) :
geo = self.createPoints().geometry()
converter = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo )
self.failUnless( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.failUnless( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
converter2 = IECoreHoudini.FromHoudiniGeometryConverter.createFromGeo( geo, IECore.TypeId.PointsPrimitive )
self.failUnless( converter2.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
# convert a mesh
def testConvertMesh( self ) :
torus = self.createTorus()
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
result = converter.convert()
self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() )
bbox = result.bound()
self.assertEqual( bbox.min.x, -2.0 )
self.assertEqual( bbox.max.x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# test prim/vertex attributes
def testConvertPrimVertAttributes( self ) :
torus = self.createTorus()
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_f3 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vert_v3f )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_i3 )
# convert it all
converter = IECoreHoudini.FromHoudiniPointsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min.x, -2.0 )
self.assertEqual( bbox.max.x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cd" not in result )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "vert_i1", "vert_i2", "vert_i3", "vert_v3f" ]
for a in attrs :
self.assert_( a not in result )
self.assert_( result.arePrimitiveVariablesValid() )
# test prim/vertex attributes on a single primitive (mesh)
def testConvertMeshPrimVertAttributes( self ) :
torus = self.createTorus()
torus.parm( "type" ).set( 1 )
geo = torus.parent()
# add vertex normals
facet = geo.createNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
facet.setInput( 0, torus )
# add a primitive colour attributes
primcol = geo.createNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
primcol.setInput( 0, facet )
# add a load of different vertex attributes
vert_f1 = geo.createNode( "attribcreate", node_name = "vert_f1", exact_type_name=True )
vert_f1.parm("name").set("vert_f1")
vert_f1.parm("class").set(3)
vert_f1.parm("value1").setExpression("$VTX*0.1")
vert_f1.setInput( 0, primcol )
vert_f2 = geo.createNode( "attribcreate", node_name = "vert_f2", exact_type_name=True )
vert_f2.parm("name").set("vert_f2")
vert_f2.parm("class").set(3)
vert_f2.parm("size").set(2)
vert_f2.parm("value1").setExpression("$VTX*0.1")
vert_f2.parm("value2").setExpression("$VTX*0.1")
vert_f2.setInput( 0, vert_f1 )
vert_f3 = geo.createNode( "attribcreate", node_name = "vert_f3", exact_type_name=True )
vert_f3.parm("name").set("vert_f3")
vert_f3.parm("class").set(3)
vert_f3.parm("size").set(3)
vert_f3.parm("value1").setExpression("$VTX*0.1")
vert_f3.parm("value2").setExpression("$VTX*0.1")
vert_f3.parm("value3").setExpression("$VTX*0.1")
vert_f3.setInput( 0, vert_f2 )
vert_quat = geo.createNode( "attribcreate", node_name = "vert_quat", exact_type_name=True )
vert_quat.parm("name").set("orient")
vert_quat.parm("class").set(3)
vert_quat.parm("size").set(4)
vert_quat.parm("value1").setExpression("$VTX*0.1")
vert_quat.parm("value2").setExpression("$VTX*0.2")
vert_quat.parm("value3").setExpression("$VTX*0.3")
vert_quat.parm("value4").setExpression("$VTX*0.4")
vert_quat.setInput( 0, vert_f3 )
vert_quat2 = geo.createNode( "attribcreate", node_name = "vert_quat2", exact_type_name=True )
vert_quat2.parm("name").set("quat_2")
vert_quat2.parm("class").set(3)
vert_quat2.parm("size").set(4)
vert_quat2.parm("typeinfo").set(6) # set type info to quaternion
vert_quat2.parm("value1").setExpression("$VTX*0.2")
vert_quat2.parm("value2").setExpression("$VTX*0.4")
vert_quat2.parm("value3").setExpression("$VTX*0.6")
vert_quat2.parm("value4").setExpression("$VTX*0.8")
vert_quat2.setInput( 0, vert_quat )
vert_m44create = geo.createNode( "attribcreate", node_name = "vert_m44create", exact_type_name=True )
vert_m44create.parm("name").set("m44")
vert_m44create.parm("class").set(3)
vert_m44create.parm("size").set(16)
vert_m44create.parm("typeinfo").set(7) # set type info to transformation matrix
vert_m44create.setInput( 0, vert_quat2 )
vert_m44 = geo.createNode( "attribwrangle", node_name = "vert_m44", exact_type_name=True )
vert_m44.parm("snippet").set("4@m44 = maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 });")
vert_m44.parm("class").set(3)
vert_m44.setInput( 0, vert_m44create )
vert_m33create = geo.createNode( "attribcreate", node_name = "vert_m33create", exact_type_name=True )
vert_m33create.parm("name").set("m33")
vert_m33create.parm("class").set(3)
vert_m33create.parm("size").set(9)
vert_m33create.setInput( 0, vert_m44 )
vert_m33 = geo.createNode( "attribwrangle", node_name = "vert_m33", exact_type_name=True )
vert_m33.parm("snippet").set("3@m33 = matrix3(maketransform(0,0,{ 0, 0, 0 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 }));")
vert_m33.parm("class").set(3)
vert_m33.setInput( 0, vert_m33create )
vert_i1 = geo.createNode( "attribcreate", node_name = "vert_i1", exact_type_name=True )
vert_i1.parm("name").set("vert_i1")
vert_i1.parm("class").set(3)
vert_i1.parm("type").set(1)
vert_i1.parm("value1").setExpression("$VTX*0.1")
vert_i1.setInput( 0, vert_m33 )
vert_i2 = geo.createNode( "attribcreate", node_name = "vert_i2", exact_type_name=True )
vert_i2.parm("name").set("vert_i2")
vert_i2.parm("class").set(3)
vert_i2.parm("type").set(1)
vert_i2.parm("size").set(2)
vert_i2.parm("value1").setExpression("$VTX*0.1")
vert_i2.parm("value2").setExpression("$VTX*0.1")
vert_i2.setInput( 0, vert_i1 )
vert_i3 = geo.createNode( "attribcreate", node_name = "vert_i3", exact_type_name=True )
vert_i3.parm("name").set("vert_i3")
vert_i3.parm("class").set(3)
vert_i3.parm("type").set(1)
vert_i3.parm("size").set(3)
vert_i3.parm("value1").setExpression("$VTX*0.1")
vert_i3.parm("value2").setExpression("$VTX*0.1")
vert_i3.parm("value3").setExpression("$VTX*0.1")
vert_i3.setInput( 0, vert_i2 )
vert_v3f = geo.createNode( "attribcreate", node_name = "vert_v3f", exact_type_name=True )
vert_v3f.parm("name").set("vert_v3f")
vert_v3f.parm("class").set(3)
vert_v3f.parm("type").set(2)
vert_v3f.parm("value1").setExpression("$VTX*0.1")
vert_v3f.parm("value2").setExpression("$VTX*0.1")
vert_v3f.parm("value3").setExpression("$VTX*0.1")
vert_v3f.setInput( 0, vert_i3 )
vertString = geo.createNode( "attribcreate", node_name = "vertString", exact_type_name=True )
vertString.parm("name").set("vertString")
vertString.parm("class").set(3)
vertString.parm("type").set(3)
vertString.parm("string").setExpression("'string %06d!' % pwd().curPoint().number()", hou.exprLanguage.Python)
vertString.setInput( 0, vert_v3f )
vertString2 = geo.createNode( "attribcreate", node_name = "vertString2", exact_type_name=True )
vertString2.parm("name").set("vertString2")
vertString2.parm("class").set(3)
vertString2.parm("type").set(3)
vertString2.parm("string").setExpression("vals = [ 'd','c','e','a','g','f','b' ]\nreturn vals[ pwd().curPoint().number() % 7 ]", hou.exprLanguage.Python)
vertString2.setInput( 0, vertString )
vert_iList = geo.createNode( "attribwrangle", node_name = "vert_iList", exact_type_name=True )
vert_iList.parm("snippet").set("int i[];\ni[]@vert_iList = i;")
vert_iList.parm("class").set(3)
vert_iList.setInput( 0, vertString2 )
vert_fList = geo.createNode( "attribwrangle", node_name = "vert_fList", exact_type_name=True )
vert_fList.parm("snippet").set("float f[];\nf[]@vert_fList = f;")
vert_fList.parm("class").set(3)
vert_fList.setInput( 0, vert_iList )
detail_i3 = geo.createNode( "attribcreate", node_name = "detail_i3", exact_type_name=True )
detail_i3.parm("name").set("detail_i3")
detail_i3.parm("class").set(0)
detail_i3.parm("type").set(1)
detail_i3.parm("size").set(3)
detail_i3.parm("value1").set(123)
detail_i3.parm("value2").set(456.789) # can we catch it out with a float?
detail_i3.parm("value3").set(789)
detail_i3.setInput( 0, vert_fList )
detail_m33create = geo.createNode( "attribcreate", node_name = "detail_m33create", exact_type_name=True )
detail_m33create.parm("name").set("detail_m33")
detail_m33create.parm("class").set(0)
detail_m33create.parm("size").set(9)
detail_m33create.setInput( 0, detail_i3 )
detail_m33 = geo.createNode( "attribwrangle", node_name = "detail_m33", exact_type_name=True )
detail_m33.parm("snippet").set("3@detail_m33 = matrix3( maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 }) );")
detail_m33.parm("class").set(0)
detail_m33.setInput( 0, detail_m33create )
detail_m44create = geo.createNode( "attribcreate", node_name = "detail_m44create", exact_type_name=True )
detail_m44create.parm("name").set("detail_m44")
detail_m44create.parm("class").set(0)
detail_m44create.parm("size").set(16)
detail_m44create.setInput( 0, detail_m33 )
detail_m44 = geo.createNode( "attribwrangle", node_name = "detail_m44", exact_type_name=True )
detail_m44.parm("snippet").set("4@detail_m44 = maketransform(0,0,{ 10, 20, 30 },{ 30, 45, 60},{ 3, 4, 5 },{ 0, 0, 0 });")
detail_m44.parm("class").set(0)
detail_m44.setInput( 0, detail_m44create )
detail_iList = geo.createNode( "attribwrangle", node_name = "detail_iList", exact_type_name=True )
detail_iList.parm("snippet").set("int i[];\ni[]@detail_iList = i;")
detail_iList.parm("class").set(0)
detail_iList.setInput( 0, detail_m44 )
detail_fList = geo.createNode( "attribwrangle", node_name = "detail_fList", exact_type_name=True )
detail_fList.parm("snippet").set("float f[];\nf[]@detail_fList = f;")
detail_fList.parm("class").set(0)
detail_fList.setInput( 0, detail_iList )
out = geo.createNode( "null", node_name="OUT" )
out.setInput( 0, detail_fList )
# convert it all
converter = IECoreHoudini.FromHoudiniPointsConverter( out )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
self.assert_( result.isInstanceOf( IECore.TypeId.PointsPrimitive ) )
bbox = result.bound()
self.assertEqual( bbox.min.x, -2.0 )
self.assertEqual( bbox.max.x, 2.0 )
self.assertEqual( result.numPoints, 100 )
for i in range( result.numPoints ) :
self.assert_( result["P"].data[i].x >= bbox.min.x )
self.assert_( result["P"].data[i].x <= bbox.max.x )
# integer and float list attributes are not currently supported, so should not appear in the primitive variable lists:
self.assertTrue( "vert_iList" not in result.keys() )
self.assertTrue( "vert_fList" not in result.keys() )
self.assertTrue( "detail_iList" not in result.keys() )
self.assertTrue( "detail_fList" not in result.keys() )
# test point attributes
self.assert_( "P" in result )
self.assertEqual( result['P'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['P'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['P'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assert_( "N" in result )
self.assertEqual( result['N'].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result['N'].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result['N'].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
# test detail attributes
self.assert_( "detail_i3" in result )
self.assertEqual( result['detail_i3'].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result['detail_i3'].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result['detail_i3'].data.value.x, 123 )
self.assertEqual( result['detail_i3'].data.value.y, 456 )
self.assertEqual( result['detail_i3'].data.value.z, 789 )
# test primitive attributes
self.assert_( "Cs" in result )
self.assertEqual( result["Cs"].data.typeId(), IECore.TypeId.Color3fVectorData )
self.assertEqual( result["Cs"].interpolation, IECore.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( result["Cs"].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ) ) :
for j in range( 0, 3 ) :
self.assert_( result["Cs"].data[i][j] >= 0.0 )
self.assert_( result["Cs"].data[i][j] <= 1.0 )
# test vertex attributes
attrs = [ "vert_f1", "vert_f2", "vert_f3", "orient", "quat_2", "vert_i1", "vert_i2", "vert_i3", "vert_v3f", "vertStringIndices" ]
for a in attrs :
self.assert_( a in result )
self.assertEqual( result[a].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result[a].data.size(), result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( result["vert_f1"].data.typeId(), IECore.FloatVectorData.staticTypeId() )
self.assertEqual( result["vert_f2"].data.typeId(), IECore.V2fVectorData.staticTypeId() )
self.assertEqual( result["vert_f3"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
self.assertEqual( result["orient"].data.typeId(), IECore.QuatfVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_f3"].data[i][j] >= 0.0 )
self.assert_( result["vert_f3"].data[i][j] < 400.1 )
self.assertAlmostEqual( result["orient"].data[i][0], i * 0.4,5 )
self.assertAlmostEqual( result["orient"].data[i][1], i * 0.1,5 )
self.assertAlmostEqual( result["orient"].data[i][2], i * 0.2,5 )
self.assertAlmostEqual( result["orient"].data[i][3], i * 0.3,5 )
self.assertAlmostEqual( result["quat_2"].data[i][0], i * 0.8,5 )
self.assertAlmostEqual( result["quat_2"].data[i][1], i * 0.2,5 )
self.assertAlmostEqual( result["quat_2"].data[i][2], i * 0.4,5 )
self.assertAlmostEqual( result["quat_2"].data[i][3], i * 0.6,5 )
self.assertEqual( result["vert_i1"].data.typeId(), IECore.IntVectorData.staticTypeId() )
self.assertEqual( result["vert_i2"].data.typeId(), IECore.V2iVectorData.staticTypeId() )
self.assertEqual( result["vert_i3"].data.typeId(), IECore.V3iVectorData.staticTypeId() )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
for j in range( 0, 3 ) :
self.assert_( result["vert_i3"].data[i][j] < 10 )
self.assertEqual( result["vert_v3f"].data.typeId(), IECore.V3fVectorData.staticTypeId() )
self.assertEqual( result["vertString"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["vertString"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["vertStringIndices"].data.typeId(), IECore.TypeId.IntVectorData )
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
self.assertEqual( result["vertString"].data[i], "string %06d!" % i )
self.assertEqual( result["vertStringIndices"].data[i], i )
# make sure the string tables are alphabetically sorted:
self.assertEqual( result["vertString2"].data, IECore.StringVectorData( ['a','b','c','d','e','f','g'] ) )
stringVals = [ 'd','c','e','a','g','f','b' ]
for i in range( 0, result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ) ) :
self.assertEqual( result["vertString2"].data[ result["vertString2Indices"].data[i] ], stringVals[ i % 7 ] )
self.assertEqual( result["m44"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result["m44"].data.typeId(), IECore.M44fVectorData.staticTypeId() )
matrixScale = IECore.M44f.extractSHRT( result["m44"].data[0] )[0]
matrixRot = IECore.M44f.extractSHRT( result["m44"].data[0] )[2]
matrixTranslation = IECore.M44f.extractSHRT( result["m44"].data[0] )[3]
self.assertEqual( matrixTranslation, IECore.V3f( 10,20,30 ) )
self.assertTrue( matrixRot.equalWithRelError( IECore.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( IECore.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["detail_m44"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["detail_m44"].data.typeId(), IECore.M44fData.staticTypeId() )
matrixScale = IECore.M44f.extractSHRT( result["detail_m44"].data.value )[0]
matrixRot = IECore.M44f.extractSHRT( result["detail_m44"].data.value )[2]
matrixTranslation = IECore.M44f.extractSHRT( result["detail_m44"].data.value )[3]
self.assertEqual( matrixTranslation, IECore.V3f( 10,20,30 ) )
self.assertTrue( matrixRot.equalWithRelError( IECore.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( IECore.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["m33"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( result["m33"].data.typeId(), IECore.M33fVectorData.staticTypeId() )
m3 = result["m33"].data[0]
m4 = IECore.M44f(
m3[(0,0)], m3[(0,1)], m3[(0,2)], 0.0,
m3[(1,0)], m3[(1,1)], m3[(1,2)], 0.0,
m3[(2,0)], m3[(2,1)], m3[(2,2)], 0.0,
0.0, 0.0, 0.0, 1.0
)
matrixScale = IECore.M44f.extractSHRT( m4 )[0]
matrixRot = IECore.M44f.extractSHRT( m4 )[2]
self.assertTrue( matrixRot.equalWithRelError( IECore.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( IECore.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assertEqual( result["detail_m33"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["detail_m33"].data.typeId(), IECore.M33fData.staticTypeId() )
m3 = result["detail_m33"].data.value
m4 = IECore.M44f(
m3[(0,0)], m3[(0,1)], m3[(0,2)], 0.0,
m3[(1,0)], m3[(1,1)], m3[(1,2)], 0.0,
m3[(2,0)], m3[(2,1)], m3[(2,2)], 0.0,
0.0, 0.0, 0.0, 1.0
)
matrixScale = IECore.M44f.extractSHRT( m4 )[0]
matrixRot = IECore.M44f.extractSHRT( m4 )[2]
self.assertTrue( matrixRot.equalWithRelError( IECore.V3f( math.pi / 6, math.pi / 4, math.pi / 3 ), 1.e-5 ) )
self.assertTrue( matrixScale.equalWithRelError( IECore.V3f( 3, 4, 5 ), 1.e-5 ) )
self.assert_( result.arePrimitiveVariablesValid() )
# convert some points
def testConvertPoints( self ) :
points = self.createPoints()
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
result = converter.convert()
self.assertEqual( result.typeId(), IECore.PointsPrimitive.staticTypeId() )
self.assertEqual( points.parm('npts').eval(), result.numPoints )
self.assert_( "P" in result.keys() )
self.assert_( "N" in result.keys() )
self.assert_( result.arePrimitiveVariablesValid() )
# simple attribute conversion
def testSetupAttributes( self ) :
points = self.createPoints()
geo = points.parent()
attr = geo.createNode( "attribcreate", exact_type_name=True )
attr.setInput( 0, points )
attr.parm("name").set( "test_attribute" )
attr.parm("type").set(0) # float
attr.parm("size").set(1) # 1 element
attr.parm("value1").set(123.456)
attr.parm("value2").set(654.321)
converter = IECoreHoudini.FromHoudiniPointsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" in result.keys() )
self.assertEqual( result["test_attribute"].data.size(), points.parm('npts').eval() )
self.assert_( result.arePrimitiveVariablesValid() )
return attr
# testing point attributes and types
def testPointAttributes( self ) :
attr = self.testSetupAttributes()
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatVectorData )
self.assert_( result["test_attribute"].data[0] > 123.0 )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attribute"].data[0], 123 )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fVectorData )
self.assertEqual( result["test_attribute"].data[0],IECore.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iVectorData )
self.assertEqual( result["test_attribute"].data[0], IECore.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).setExpression("'string %06d!' % pwd().curPoint().number()", hou.exprLanguage.Python)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringVectorData )
self.assertEqual( result["test_attribute"].data[10], "string 000010!" )
self.assertEqual( result["test_attribute"].data.size(), 5000 )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( result["test_attributeIndices"].data.typeId(), IECore.TypeId.IntVectorData )
self.assertEqual( result["test_attributeIndices"].data[10], 10 )
self.assertEqual( result["test_attributeIndices"].data.size(), 5000 )
self.assertEqual( result["test_attributeIndices"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assert_( result.arePrimitiveVariablesValid() )
# testing detail attributes and types
def testDetailAttributes( self ) :
attr = self.testSetupAttributes()
attr.parm("class").set(0) # detail attribute
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
attr.parm("value1").set(123.456)
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.FloatData )
self.assert_( result["test_attribute"].data > IECore.FloatData( 123.0 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # integer
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.IntData )
self.assertEqual( result["test_attribute"].data, IECore.IntData( 123 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(2) # 2 elementS
attr.parm("value2").set(456.789)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2fData )
self.assertEqual( result["test_attribute"].data.value, IECore.V2f( 123.456, 456.789 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V2iData )
self.assertEqual( result["test_attribute"].data.value, IECore.V2i( 123, 456 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(0) # float
attr.parm("size").set(3) # 3 elements
attr.parm("value3").set(999.999)
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3fData )
self.assertEqual( result["test_attribute"].data.value, IECore.V3f( 123.456, 456.789, 999.999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set(1) # int
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.V3iData )
self.assertEqual( result["test_attribute"].data.value, IECore.V3i( 123, 456, 999 ) )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
attr.parm("type").set( 3 ) # string
attr.parm( "string" ).set( "string!" )
result = IECoreHoudini.FromHoudiniPointsConverter( attr ).convert()
self.assertEqual( result["test_attribute"].data.typeId(), IECore.TypeId.StringData )
self.assertEqual( result["test_attribute"].data.value, "string!" )
self.assertEqual( result["test_attribute"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assert_( result.arePrimitiveVariablesValid() )
# testing that float[4] doesn't work!
def testFloat4attr( self ) : # we can't deal with float 4's right now
attr = self.testSetupAttributes()
attr.parm("name").set( "test_attribute" )
attr.parm("size").set(4) # 4 elements per point-attribute
converter = IECoreHoudini.FromHoudiniPointsConverter( attr )
result = converter.convert()
self.assert_( "test_attribute" not in result.keys() ) # invalid due to being float[4]
self.assert_( result.arePrimitiveVariablesValid() )
# testing conversion of animating geometry
def testAnimatingGeometry( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
mountain = geo.createNode( "mountain" )
mountain.parm("offset1").setExpression( "$FF" )
points = geo.createNode( "scatter" )
facet.setInput( 0, torus )
mountain.setInput( 0, facet )
points.setInput( 0, mountain )
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
hou.setFrame(1)
points_1 = converter.convert()
hou.setFrame(2)
converter = IECoreHoudini.FromHoudiniPointsConverter( points )
points_2 = converter.convert()
self.assertNotEqual( points_1["P"].data, points_2["P"].data )
# testing we can handle an object being deleted
def testObjectWasDeleted( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniPointsConverter( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniPointsConverter, torus ) )
# testing we can handle an object being deleted
def testObjectWasDeletedFactory( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
torus = geo.createNode( "torus" )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( torus )
g1 = converter.convert()
torus.destroy()
g2 = converter.convert()
self.assertEqual( g2, g1 )
self.assertRaises( RuntimeError, IECore.curry( IECoreHoudini.FromHoudiniGeometryConverter.create, torus ) )
# testing converting a Houdini particle primitive with detail and point attribs
def testParticlePrimitive( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = geo.createNode( "popnet" )
location = popnet.createNode( "location" )
detailAttr = popnet.createOutputNode( "attribcreate", exact_type_name=True )
detailAttr.parm("name").set( "float3detail" )
detailAttr.parm("class").set( 0 ) # detail
detailAttr.parm("type").set( 0 ) # float
detailAttr.parm("size").set( 3 ) # 3 elements
detailAttr.parm("value1").set( 1 )
detailAttr.parm("value2").set( 2 )
detailAttr.parm("value3").set( 3 )
pointAttr = detailAttr.createOutputNode( "attribcreate", exact_type_name=True )
pointAttr.parm("name").set( "float3point" )
pointAttr.parm("class").set( 2 ) # point
pointAttr.parm("type").set( 0 ) # float
pointAttr.parm("size").set( 3 ) # 3 elements
pointAttr.parm("value1").set( 1 )
pointAttr.parm("value2").set( 2 )
pointAttr.parm("value3").set( 3 )
hou.setFrame( 5 )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( pointAttr )
self.assert_( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
points = converter.convert()
self.assertEqual( type(points), IECore.PointsPrimitive )
self.assertEqual( points.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 21 )
self.assertEqual( points["float3detail"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( type(points["float3detail"].data), IECore.V3fData )
self.assert_( points["float3detail"].data.value.equalWithRelError( IECore.V3f( 1, 2, 3 ), 1e-10 ) )
self.assertEqual( type(points["float3point"].data), IECore.V3fVectorData )
self.assertEqual( points["float3point"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
for p in points["float3point"].data :
self.assert_( p.equalWithRelError( IECore.V3f( 1, 2, 3 ), 1e-10 ) )
self.assert_( points.arePrimitiveVariablesValid() )
add = pointAttr.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
converter = IECoreHoudini.FromHoudiniPointsConverter( add )
points2 = converter.convert()
del points['generator']
del points['generatorIndices']
del points['born']
del points['source']
self.assertEqual( points2, points )
def testMultipleParticlePrimitives( self ) :
obj = hou.node("/obj")
geo = obj.createNode( "geo", run_init_scripts=False )
popnet = geo.createNode( "popnet" )
fireworks = popnet.createNode( "fireworks" )
hou.setFrame( 15 )
converter = IECoreHoudini.FromHoudiniPointsConverter( popnet )
points = converter.convert()
self.assertEqual( type(points), IECore.PointsPrimitive )
self.assertEqual( points.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 24 )
self.assertEqual( points["accel"].interpolation, IECore.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( type(points["accel"].data), IECore.V3fVectorData )
self.assertEqual( points["accel"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertEqual( points["nextid"].interpolation, IECore.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( points["nextid"].data, IECore.IntData( 25 ) )
self.assertTrue( points.arePrimitiveVariablesValid() )
add = popnet.createOutputNode( "add" )
add.parm( "keep" ).set( 1 ) # deletes primitive and leaves points
converter = IECoreHoudini.FromHoudiniPointsConverter( add )
points2 = converter.convert()
# showing that prim attribs don't get converted because the interpolation size doesn't match
self.assertEqual( points2, points )
def testName( self ) :
points = self.createPoints()
particles = points.createOutputNode( "add" )
particles.parm( "addparticlesystem" ).set( True )
name = particles.createOutputNode( "name" )
name.parm( "name1" ).set( "points" )
box = points.parent().createNode( "box" )
name2 = box.createOutputNode( "name" )
name2.parm( "name1" ).set( "box" )
merge = name.createOutputNode( "merge" )
merge.setInput( 1, name2 )
converter = IECoreHoudini.FromHoudiniPointsConverter( merge )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# both shapes were converted as one PointsPrimitive
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 5008 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "points" )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# only the named points were converted
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 5000 )
self.assertTrue( result.arePrimitiveVariablesValid() )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( merge, "box", IECore.TypeId.PointsPrimitive )
self.assertTrue( converter.isInstanceOf( IECore.TypeId( IECoreHoudini.TypeId.FromHoudiniPointsConverter ) ) )
result = converter.convert()
# names are not stored on the object at all
self.assertEqual( result.blindData(), IECore.CompoundData() )
self.assertFalse( "name" in result )
self.assertFalse( "nameIndices" in result )
# only the named points were converted
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), 8 )
self.assertEqual( result.variableSize( IECore.PrimitiveVariable.Interpolation.Uniform ), 1 )
self.assertTrue( result.arePrimitiveVariablesValid() )
def testAttributeFilter( self ) :
points = self.createPoints()
particles = points.createOutputNode( "add" )
particles.parm( "addparticlesystem" ).set( True )
# add vertex normals
facet = particles.createOutputNode( "facet", node_name = "add_point_normals" )
facet.parm("postnml").set(True)
# add a primitive colour attributes
primcol = facet.createOutputNode( "primitive", node_name = "prim_colour" )
primcol.parm("doclr").set(1)
primcol.parm("diffr").setExpression("rand($PR)")
primcol.parm("diffg").setExpression("rand($PR+1)")
primcol.parm("diffb").setExpression("rand($PR+2)")
detail = primcol.createOutputNode( "attribcreate", node_name = "detail", exact_type_name=True )
detail.parm("name").set("detailAttr")
detail.parm("class").set(0)
detail.parm("type").set(1)
detail.parm("size").set(3)
detail.parm("value1").set(123)
detail.parm("value2").set(456.789) # can we catch it out with a float?
detail.parm("value3").set(789)
converter = IECoreHoudini.FromHoudiniPointsConverter( detail )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "N", "P", "detailAttr", "varmap" ] )
converter.parameters()["attributeFilter"].setTypedValue( "P" )
self.assertEqual( sorted(converter.convert().keys()), [ "P" ] )
converter.parameters()["attributeFilter"].setTypedValue( "* ^N ^varmap" )
self.assertEqual( sorted(converter.convert().keys()), [ "Cs", "P", "detailAttr" ] )
# P must be converted
converter.parameters()["attributeFilter"].setTypedValue( "* ^P" )
self.assertTrue( "P" in converter.convert().keys() )
def testStandardAttributeConversion( self ) :
points = self.createPoints()
color = points.createOutputNode( "color" )
color.parm( "colortype" ).set( 2 )
rest = color.createOutputNode( "rest" )
scale = rest.createOutputNode( "attribcreate" )
scale.parm( "name1" ).set( "pscale" )
scale.parm( "value1v1" ).setExpression( "$PT" )
converter = IECoreHoudini.FromHoudiniPointsConverter( scale )
result = converter.convert()
if hou.applicationVersion()[0] >= 15 :
self.assertEqual( result.keys(), [ "Cs", "N", "P", "Pref", "width" ] )
else :
self.assertEqual( result.keys(), [ "Cs", "N", "P", "Pref", "varmap", "width" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["Pref"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
converter["convertStandardAttributes"].setTypedValue( False )
result = converter.convert()
if hou.applicationVersion()[0] >= 15 :
self.assertEqual( result.keys(), [ "Cd", "N", "P", "pscale", "rest" ] )
else :
self.assertEqual( result.keys(), [ "Cd", "N", "P", "pscale", "rest", "varmap" ] )
self.assertTrue( result.arePrimitiveVariablesValid() )
self.assertEqual( result["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["rest"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( result["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 3,866,630,561,449,466,000 | 46.919182 | 155 | 0.708268 | false |
SimpleGeometry/bisv-ml | kaggle-titanic/model.py | 1 | 3219 | import pandas as pd
from sklearn.tree import DecisionTreeClassifier #not Regressor
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process.kernels import RBF
from sklearn.preprocessing import Imputer #needed to fill empty values
import numpy as np
#new imports:
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier)
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
# feature engineering
train['Sex'] = train['Sex'].map({'male': 0, 'female': 1}).astype(int)
test['Sex'] = test['Sex'].map({'male': 0, 'female': 1}).astype(int)
train['Name_length'] = train['Name'].apply(len)
test['Name_length'] = test['Name'].apply(len)
#train.to_csv('revised_input.csv', index=False)
imputer = Imputer()
columns = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'Sex', 'Name_length']
train_X = imputer.fit_transform(train[columns])
test_X = imputer.fit_transform(test[columns])
train_y = train.Survived
model1 = GradientBoostingClassifier( n_estimators= 500,
#max_features=0.2,
max_depth= 5,
min_samples_leaf= 2,
verbose= 0
)
model1.fit(train_X, train_y)
model2 = AdaBoostClassifier(n_estimators= 500,
learning_rate=0.75)
model2.fit(train_X, train_y)
model3 = RandomForestClassifier(n_jobs=-1,
n_estimators=500,
warm_start=True,
#'max_features': 0.2,
max_depth=6,
min_samples_leaf=2,
max_features='sqrt',
verbose=0
)
model3.fit(train_X, train_y)
model4 = ExtraTreesClassifier(n_jobs=-1,
n_estimators=500,
#max_features=0.5,
max_depth=8,
min_samples_leaf=2,
verbose=0)
model4.fit(train_X, train_y)
model5 = SVC(kernel='linear',
C=0.025)
model5.fit(train_X, train_y)
train_X1 = model1.predict(train_X)
train_X2 = model2.predict(train_X)
train_X3 = model3.predict(train_X)
train_X4 = model4.predict(train_X)
train_X5 = model5.predict(train_X)
train_X1 = train_X1[:, np.newaxis]
train_X2 = train_X2[:, np.newaxis]
train_X3 = train_X3[:, np.newaxis]
train_X4 = train_X4[:, np.newaxis]
train_X5 = train_X5[:, np.newaxis]
test_X1 = model1.predict(test_X)
test_X2 = model2.predict(test_X)
test_X3 = model3.predict(test_X)
test_X4 = model4.predict(test_X)
test_X5 = model5.predict(test_X)
test_X1 = test_X1[:, np.newaxis]
test_X2 = test_X2[:, np.newaxis]
test_X3 = test_X3[:, np.newaxis]
test_X4 = test_X4[:, np.newaxis]
test_X5 = test_X5[:, np.newaxis]
#print(train_X1.shape)
new_train_X = np.concatenate((train_X1, train_X2, train_X3, train_X4, train_X5), axis=1)
new_test_X = np.concatenate((test_X1, test_X2, test_X3, test_X4, test_X5), axis=1)
final_model = GradientBoostingClassifier(n_estimators= 500,
#max_features=0.2,
max_depth=5,
min_samples_leaf=2,
verbose=0)
final_model.fit(new_train_X, train_y)
test_y = final_model.predict(new_test_X)
test_y = pd.DataFrame(test_y, columns=['Survived'])
test_ids = test.PassengerId
prediction = pd.DataFrame(pd.concat([test_ids, test_y], axis=1), columns=['PassengerId', 'Survived'])
prediction.to_csv('prediction.csv', index=False)
| mit | 4,619,664,483,775,947,000 | 29.367925 | 123 | 0.706431 | false |
bung87/django-moe-auth | setup.py | 1 | 1126 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import codecs
# import os
from distutils.core import setup
from setuptools import find_packages
# version_tuple = __import__('django_js_reverse').VERSION
# version = '.'.join([str(v) for v in version_tuple])
setup(
name='django-moe-auth',
version='0.0.1',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
],
license='MIT',
description='Javascript url handling for Django that doesn\'t hurt.',
# long_description=read('README.rst'),
author='Bung',
author_email='zh.bung@gmail.com',
url='https://github.com/bung87/django-moe-auth',
download_url='https://github.com/bung87/django-moe-auth',
packages=find_packages(),
install_requires=[
'Django>=1.5',
'mongoengine==0.8.8',
'djangorestframework==3.0.5',
'django-allauth>=0.19.1'
]
)
| mit | -3,784,006,114,095,165,400 | 29.432432 | 73 | 0.618117 | false |
revdotcom/babelsubs | babelsubs/utils.py | 2 | 7601 | import re
import bleach
import htmllib
import htmlentitydefs
import formatter
from itertools import chain
from xmlconst import *
DEFAULT_ALLOWED_TAGS = ['i', 'b', 'u']
MULTIPLE_SPACES = re.compile('\s{2,}')
BLANK_CHARS = re.compile('[\n\t\r]*')
# We support unsyced subs, meaning there is not timing data for them
# in which case we flag them with the largest possible time value
UNSYNCED_TIME_FULL = (60 * 60 * 100 * 1000) - 1
# some formats limit hours to 1 digit, so the max available time must
# be adjusted
UNSYNCED_TIME_ONE_HOUR_DIGIT = (60 * 60 * 10 * 1000) - 1000
def unescape_html(s):
p = htmllib.HTMLParser(formatter.NullFormatter() )
# we need to preserve line breaks, nofill makes sure we don't
# loose them
p.nofill = True
p.save_bgn()
p.feed(s)
return p.save_end().strip()
LANG_DIALECT_RE = re.compile(r'(?P<lang_code>[\w]{2,13})(?P<dialect>-[\w]{2,8})?(?P<rest>-[\w]*)?')
def to_bcp47(code):
"""
This is an ugly hack. I should be ashamed, but I'm not.
Implementing BCP47 will be much more work.
The idea is to translate from a lang code unilangs supports
into the bpc47 format. There are cases where this might fail
(as if the dialect code is not recognized by bcp47). For most cases this should be ok.
Simple sanity chech:
assert (unilangs.to_bcp47("en-us"), unilangs.to_bcp47('en'), unilangs.to_bcp47('ug_Arab-cn')) == ('en-US', 'en', 'ug_Arab-CN'
)
"""
if not code:
raise ValueError("No language was passed")
match = LANG_DIALECT_RE.match(code)
if not match:
raise ValueError("%s code does not seem to be a valid language code.")
match_dict = match.groupdict()
return "%s%s%s" % (match_dict['lang_code'],
(match_dict.get('dialect', "") or "").upper(),
match_dict.get('rest', '') or "")
def generate_style_map(dom):
'''
Parse the head.styling node on the xml and generate a hash -> list
of styles that require our supported formatting optins (bold and
italic for now).
eg.
style_map = {
'italic': ['speaker', 'importante'],
'bold': [],
}
This will be used when parsing each text node to make sure
we can convert to our own styling markers.
'''
style_map = {
'italic': [],
'bold': [],
}
styling_nodes = dom.getElementsByTagName("styling")
style_nodes = chain.from_iterable([x.getElementsByTagName('style') for x in styling_nodes])
for style_node in style_nodes:
style_id = style_node.getAttribute('xml:id')
for key in style_node.attributes.keys():
value = style_node.attributes[key].value
if key == 'tts:fontWeight' and value == 'bold':
style_map['bold'].append(style_id)
elif key == 'tts:fontStyle' and value == 'italic':
style_map['italic'].append(style_id)
return style_map
def strip_tags(text, tags=None):
"""
Returns text with the tags stripped.
By default we allow the standard formatting tags
to pass (i,b,u).
Any other tag's content will be present, but with tags removed.
"""
if tags is None:
tags = DEFAULT_ALLOWED_TAGS
return bleach.clean(text, tags=tags, strip=True)
def escape_ampersands(text):
"""Take a string of chars and replace ampersands with &"""
return text.replace('&', '&')
def entities_to_chars(text):
"""Removes HTML or XML character references and entities from a text string.
http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def from_xmlish_text(input_str):
"""
Parses text content from xml based formats.
<br> tags are transformed into newlines, tab and multiple spaces
collapsed. e.g. turns:
"\n\r foo <br/> bar foorer \t " -> "foo bar\nfoorer"
"""
if not input_str:
return u""
# remove new lines and tabs
input_str = BLANK_CHARS.sub(u"", input_str)
# do convert <br> to new lines
input_str = input_str.replace("<br/>", "\n")
# collapse whitespace on each new line
return "\n".join( MULTIPLE_SPACES.sub(u" ", x).strip() for x in input_str.split('\n'))
def unsynced_time_components(one_hour_digit=False, uses_centiseconds=False):
return {
'hours': 9 if one_hour_digit else 99,
'minutes': 59,
'seconds': 59,
'milliseconds': 99 if uses_centiseconds else 999,
'centiseconds': 99,
}
def milliseconds_to_time_clock_components(milliseconds,
unsynced_val=UNSYNCED_TIME_FULL,
use_centiseconds=False):
"""Convert milliseconds to a dict of hours, minutes, seconds, milliseconds.
Milliseconds should be given as an integer, or None. None will be converted
to all zeros.
If use_centiseconds is True, the resulting dict will have a centiseconds
entry instead of a milliseconds one.
"""
components = dict(hours=0, minutes=0, seconds=0, milliseconds=0)
if milliseconds is not None:
components['seconds'], components['milliseconds'] = divmod(int(milliseconds), 1000)
components['minutes'], components['seconds'] = divmod(components['seconds'], 60 )
components['hours'], components['minutes'] = divmod(components['minutes'], 60 )
if use_centiseconds:
ms = components.pop('milliseconds')
components['centiseconds'] = round(ms / 10.0)
return components
def fraction_to_milliseconds(str_milli):
"""
Converts milliseonds as an integer string to a 3 padded string, e.g.
1 -> 001
10 -> 010
100 -> 100
"""
if not str_milli:
return 0
return int(str_milli.ljust(3, '0')) % 1000
def centiseconds_to_milliseconds(centi):
return int(centi) * 10 if centi else 0
def indent_ttml(tt_elt, indent_width=4):
"""Indent TTML tree
This function walks the XML tree and adjusts the text and tail attributes
so that the output will be nicely indented. It skips <p> elements and
their children, since whitespace is significant there.
Also, we will add a newline after the closing tag for the TT element.
:param tt_elt: etree TT root element.
"""
_do_indent_ttml(tt_elt, " " * indent_width, 0)
tt_elt.tail = "\n"
def _do_indent_ttml(elt, indent, indent_level):
if elt.tag == TTML + 'p' or len(elt) == 0:
return
children = list(elt)
# before a child element, we want to start a new line, then indent enough
# to move them to the next indentation level
pre_child_indent = "\n" + indent * (indent_level + 1)
elt.text = pre_child_indent
for child in children[:-1]:
child.tail = pre_child_indent
# after the last child, we need to position our closing tag. This means
# indenting enough to move it to our indentation level.
children[-1].tail = "\n" + indent * indent_level
for child in children:
_do_indent_ttml(child, indent, indent_level + 1)
| bsd-3-clause | 4,482,841,142,669,328,000 | 33.393665 | 129 | 0.616629 | false |
markfasheh/ocfs2-tools | ocfs2console/ocfs2interface/mount.py | 1 | 5115 | # OCFS2Console - GUI frontend for OCFS2 management and debugging
# Copyright (C) 2002, 2005 Oracle. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import gtk
import ocfs2
from guiutil import set_props, error_box
from fstab import FSTab
from process import Process
def mount(parent, device):
mountpoint, options = query_mount(parent, device)
if not mountpoint:
return None
command = ('mount', '-t', 'ocfs2', device, mountpoint)
if options:
command = list(command)
command[1:1] = ('-o', options)
p = Process(command, 'Mount', 'Mounting...', parent, spin_now=True)
success, output, killed = p.reap()
if not success:
if killed:
error_box(parent, 'mount died unexpectedly! Your system is '
'probably in an inconsistent state. You '
'should reboot at the earliest opportunity')
else:
error_box(parent, '%s: Could not mount %s' % (output, device))
return None
else:
return mountpoint
def unmount(parent, device, mountpoint):
command = ('umount', mountpoint)
p = Process(command, 'Unmount', 'Unmounting...', parent)
success, output, killed = p.reap()
if not success:
if killed:
error_box(parent, 'umount died unexpectedly! Your system is '
'probably in an inconsistent state. You '
'should reboot at the earliest opportunity')
else:
error_box(parent, '%s: Could not unmount %s mounted on %s' %
(output, device, mountpoint))
def query_mount(parent, device):
default_mountpoint, default_options = get_defaults(device)
dialog = gtk.Dialog(parent=parent,
flags=gtk.DIALOG_DESTROY_WITH_PARENT,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
table = gtk.Table(rows=2, columns=2)
set_props(table, row_spacing=6,
column_spacing=6,
border_width=6,
parent=dialog.vbox)
def text_changed(entry):
text = entry.get_text()
valid = len(text) > 1 and text.startswith('/')
dialog.set_response_sensitive(gtk.RESPONSE_OK, valid)
mountpoint = gtk.Entry()
mountpoint.connect('changed', text_changed)
mountpoint.set_text(default_mountpoint)
text_changed(mountpoint)
options = gtk.Entry()
options.set_text(default_options)
row = 0
for prompt, entry in (('_Mountpoint', mountpoint),
('O_ptions', options)):
label = gtk.Label()
label.set_text_with_mnemonic(prompt + ':')
set_props(label, xalign=0.0)
table.attach(label, 0, 1, row, row + 1)
entry.set_activates_default(True)
label.set_mnemonic_widget(entry)
table.attach(entry, 1, 2, row, row + 1)
row = row + 1
dialog.show_all()
if dialog.run() == gtk.RESPONSE_OK:
mount_params = mountpoint.get_text(), options.get_text()
else:
mount_params = None, None
dialog.destroy()
return mount_params
def get_defaults(device):
label, uuid = get_ocfs2_id(device)
fstab = FSTab()
entry = fstab.get(device=device, label=label, uuid=uuid)
if entry and entry.vfstype == 'ocfs2':
return entry.mountpoint, entry.options
else:
return '', ''
def get_ocfs2_id(device):
try:
fs = ocfs2.Filesystem(device)
super = fs.fs_super
label = super.s_label
uuid = super.uuid_unparsed
except ocfs2.error:
label = uuid = None
return (label, uuid)
def main():
import sys
device = sys.argv[1]
def dummy(*args):
gtk.main_quit()
window = gtk.Window()
window.connect('delete-event', dummy)
vbbox = gtk.VButtonBox()
window.add(vbbox)
window.mountpoint = None
def test_mount(b):
window.mountpoint = mount(window, device)
button = gtk.Button('Mount')
button.connect('clicked', test_mount)
vbbox.add(button)
def test_unmount(b):
unmount(window, device, window.mountpoint)
button = gtk.Button('Unmount')
button.connect('clicked', test_unmount)
vbbox.add(button)
window.show_all()
gtk.main()
if __name__ == '__main__':
main()
| gpl-2.0 | 519,026,439,820,794,300 | 27.416667 | 76 | 0.608798 | false |
ProjectQ-Framework/FermiLib | src/fermilib/utils/_trotter_error_test.py | 1 | 6490 | # Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for _trotter_error.py."""
from future.utils import iteritems
from math import sqrt
import numpy
from scipy.linalg import expm
import unittest
from fermilib.config import *
from fermilib.ops import normal_ordered
from fermilib.transforms import get_sparse_operator
from fermilib.utils import MolecularData
from fermilib.utils._trotter_error import *
from projectq.ops import QubitOperator
class CommutatorTest(unittest.TestCase):
def test_commutator_commutes(self):
zero = QubitOperator()
self.assertTrue(commutator(QubitOperator(()),
QubitOperator('X3')).isclose(zero))
def test_commutator_single_pauli(self):
com = commutator(QubitOperator('X3'),
QubitOperator('Y3'))
expected = 2j * QubitOperator('Z3')
self.assertTrue(expected.isclose(com))
def test_commutator_multi_pauli(self):
com = commutator(QubitOperator('Z1 X2 Y4'),
QubitOperator('X1 Z2 X4'))
expected = -2j * QubitOperator('Y1 Y2 Z4')
self.assertTrue(expected.isclose(com))
class TriviallyCommutesTest(unittest.TestCase):
def test_trivially_commutes_id_id(self):
self.assertTrue(trivially_commutes(
QubitOperator(()), 3 * QubitOperator(())))
def test_trivially_commutes_id_x(self):
self.assertTrue(trivially_commutes(
QubitOperator(()), QubitOperator('X1')))
def test_trivially_commutes_id_xx(self):
self.assertTrue(trivially_commutes(
QubitOperator(()), QubitOperator('X1 X3')))
def test_trivially_commutes_nonid_with_id(self):
self.assertTrue(trivially_commutes(
QubitOperator('X1 Z5 Y9 Z11'), QubitOperator(())))
def test_trivially_commutes_no_intersect(self):
self.assertTrue(trivially_commutes(
QubitOperator('X1 Y3 Z6'), QubitOperator('Z0 Z2 X4 Y5')))
def test_trivially_commutes_allsame_oddintersect(self):
self.assertTrue(trivially_commutes(
QubitOperator('X1 X3 X4 Z6 X8'), QubitOperator('X1 X3 X4 Z7 Y9')))
def test_trivially_commutes_even_anti(self):
self.assertTrue(trivially_commutes(
QubitOperator('X1 Z2 Z3 X10'), QubitOperator('Y1 X2 Z3 Y9')))
def test_no_trivial_commute_odd_anti(self):
self.assertFalse(trivially_commutes(
QubitOperator('X1'), QubitOperator('Y1')))
def test_no_trivial_commute_triple_anti_intersect(self):
self.assertFalse(trivially_commutes(
QubitOperator('X0 Z2 Z4 Z9 Y17'),
QubitOperator('Y0 X2 Y4 Z9 Z16')))
def test_no_trivial_commute_mostly_commuting(self):
self.assertFalse(trivially_commutes(
QubitOperator('X0 Y1 Z2 X4 Y5 Y6'),
QubitOperator('X0 Y1 Z2 X4 Z5 Y6')))
class TriviallyDoubleCommutesTest(unittest.TestCase):
def test_trivial_double_commute_no_intersect(self):
self.assertTrue(trivially_double_commutes(
QubitOperator('X1 Z2 Y4'), QubitOperator('Y0 X3 Z6'),
QubitOperator('Y5')))
def test_trivial_double_commute_no_intersect_a_bc(self):
self.assertTrue(trivially_double_commutes(
QubitOperator('X1 Z2 Y4'), QubitOperator('Y0 X3 Z6'),
QubitOperator('Z3 Y5')))
def test_trivial_double_commute_bc_intersect_commute(self):
self.assertTrue(trivially_double_commutes(
QubitOperator('X1 Z2 Y4'), QubitOperator('X0 Z3'),
QubitOperator('Y0 X3')))
class ErrorOperatorTest(unittest.TestCase):
def test_error_operator_bad_order(self):
with self.assertRaises(NotImplementedError):
error_operator([QubitOperator], 1)
def test_error_operator_all_diagonal(self):
terms = [QubitOperator(()), QubitOperator('Z0 Z1 Z2'),
QubitOperator('Z0 Z3'), QubitOperator('Z0 Z1 Z2 Z3')]
zero = QubitOperator()
self.assertTrue(zero.isclose(error_operator(terms)))
class ErrorBoundTest(unittest.TestCase):
def test_error_bound_xyz_tight(self):
terms = [QubitOperator('X1'), QubitOperator('Y1'), QubitOperator('Z1')]
expected = sqrt(7. / 12) # 2-norm of [[-2/3, 1/3+i/6], [1/3-i/6, 2/3]]
self.assertLess(expected, error_bound(terms, tight=True))
def test_error_bound_xyz_loose(self):
terms = [QubitOperator('X1'), QubitOperator('Y1'), QubitOperator('Z1')]
self.assertTrue(numpy.isclose(
error_bound(terms, tight=False), 4. * (2 ** 2 + 1 ** 2)))
def test_error_operator_xyz(self):
terms = [QubitOperator('X1'), QubitOperator('Y1'), QubitOperator('Z1')]
expected = numpy.array([[-2./3, 1./3 + 1.j/6, 0., 0.],
[1./3 - 1.j/6, 2./3, 0., 0.],
[0., 0., -2./3, 1./3 + 1.j/6],
[0., 0., 1./3 - 1.j/6, 2./3]])
sparse_op = get_sparse_operator(error_operator(terms))
matrix = sparse_op.todense()
self.assertTrue(numpy.allclose(matrix, expected),
("Got " + str(matrix)))
def test_error_bound_qubit_tight_less_than_loose_integration(self):
terms = [QubitOperator('X1'), QubitOperator('Y1'), QubitOperator('Z1')]
self.assertLess(error_bound(terms, tight=True),
error_bound(terms, tight=False))
class TrotterStepsRequiredTest(unittest.TestCase):
def test_trotter_steps_required(self):
self.assertEqual(trotter_steps_required(
trotter_error_bound=0.3, time=2.5, energy_precision=0.04), 7)
def test_trotter_steps_required_negative_time(self):
self.assertEqual(trotter_steps_required(
trotter_error_bound=0.1, time=3.3, energy_precision=0.11), 4)
def test_return_type(self):
self.assertIsInstance(trotter_steps_required(0.1, 0.1, 0.1), int)
| apache-2.0 | 837,469,697,044,621,600 | 38.333333 | 79 | 0.642527 | false |
tadamic/sokoenginepy | src/sokoenginepy/tessellation/hexoban_tessellation/hexoban_tessellation.py | 1 | 3295 | from ...utilities import COLUMN, ROW, index_1d, inverted, is_on_board_2d
from ..direction import Direction, UnknownDirectionError
from ..tessellation_base import TessellationBase, TessellationBaseInheritableDocstrings
class HexobanTessellation(
TessellationBase, metaclass=TessellationBaseInheritableDocstrings
):
_LEGAL_DIRECTIONS = (
Direction.LEFT,
Direction.RIGHT,
Direction.NORTH_EAST,
Direction.NORTH_WEST,
Direction.SOUTH_EAST,
Direction.SOUTH_WEST,
)
_CHR_TO_ATOMIC_MOVE = None
_ATOMIC_MOVE_TO_CHR = None
@property
@copy_ancestor_docstring
def legal_directions(self):
return self._LEGAL_DIRECTIONS
@property
@copy_ancestor_docstring
def graph_type(self):
from ...graph import GraphType
return GraphType.DIRECTED
@copy_ancestor_docstring
def neighbor_position(self, position, direction, board_width, board_height):
# if not is_on_board_1d(position, board_width, board_height):
# return None
row = ROW(position, board_width)
column = COLUMN(position, board_width)
if direction == Direction.LEFT:
column -= 1
elif direction == Direction.RIGHT:
column += 1
elif direction == Direction.NORTH_EAST:
column += row % 2
row -= 1
elif direction == Direction.NORTH_WEST:
column -= (row + 1) % 2
row -= 1
elif direction == Direction.SOUTH_EAST:
column += row % 2
row += 1
elif direction == Direction.SOUTH_WEST:
column -= (row + 1) % 2
row += 1
else:
raise UnknownDirectionError(direction)
if is_on_board_2d(column, row, board_width, board_height):
return index_1d(column, row, board_width)
return None
@property
def _char_to_atomic_move_dict(self):
if not self.__class__._CHR_TO_ATOMIC_MOVE:
from ...snapshot import AtomicMoveCharacters
self.__class__._CHR_TO_ATOMIC_MOVE = {
AtomicMoveCharacters.l: (Direction.LEFT, False),
AtomicMoveCharacters.L: (Direction.LEFT, True),
AtomicMoveCharacters.r: (Direction.RIGHT, False),
AtomicMoveCharacters.R: (Direction.RIGHT, True),
AtomicMoveCharacters.u: (Direction.NORTH_WEST, False),
AtomicMoveCharacters.U: (Direction.NORTH_WEST, True),
AtomicMoveCharacters.d: (Direction.SOUTH_EAST, False),
AtomicMoveCharacters.D: (Direction.SOUTH_EAST, True),
AtomicMoveCharacters.n: (Direction.NORTH_EAST, False),
AtomicMoveCharacters.N: (Direction.NORTH_EAST, True),
AtomicMoveCharacters.s: (Direction.SOUTH_WEST, False),
AtomicMoveCharacters.S: (Direction.SOUTH_WEST, True),
}
return self._CHR_TO_ATOMIC_MOVE
@property
def _atomic_move_to_char_dict(self):
if not self.__class__._ATOMIC_MOVE_TO_CHR:
self.__class__._ATOMIC_MOVE_TO_CHR = inverted(
self._char_to_atomic_move_dict
)
return self._ATOMIC_MOVE_TO_CHR
def __str__(self):
return "hexoban"
| gpl-3.0 | -4,547,235,167,960,441,300 | 34.053191 | 87 | 0.598786 | false |
tolteck/stripe_mock_server | localstripe/errors.py | 1 | 1326 | # -*- coding: utf-8 -*-
# Copyright 2017 Adrien Vergé
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from aiohttp import web
def json_response(*args, **kwargs):
return web.json_response(
*args,
dumps=lambda x: json.dumps(x, indent=2, sort_keys=True) + '\n',
**kwargs)
class UserError(Exception):
def __init__(self, code, message=None, contents=None):
Exception.__init__(self)
self.code = code
self.body = {'error': contents or {}}
self.body['error']['type'] = 'invalid_request_error'
if message is not None:
self.body['error']['message'] = message
def to_response(self):
return json_response(self.body, status=self.code)
| gpl-3.0 | 9,209,162,390,930,675,000 | 32.974359 | 71 | 0.678491 | false |
lanhel/pyzombie | setup/lib/distutils_local/build_docutils.py | 1 | 4094 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#-------------------------------------------------------------------------------
"""test
Implements a Distutils 'test' command."""
__author__ = ('Lance Finn Helsten',)
__version__ = '1.0.1'
__copyright__ = """Copyright 2009 Lance Finn Helsten (helsten@acm.org)"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ['build_docutils']
import sys
if sys.version_info < (3, 0):
raise Exception("pytrader requires Python 3.0 or higher.")
import os
import itertools
import subprocess
from distutils.core import Command
def doc_paths(packages):
"""Given a list of package names find all the reStructured text files
with a '.rst' extension."""
dirs = [p.replace('.', os.sep) for p in packages]
dirs = [os.path.abspath(p) for p in dirs]
files = [[os.path.join(p, f) for f in os.listdir(p)] for p in dirs]
files = [f for f in itertools.chain(*files) if os.path.splitext(f)[1] == '.rst']
files = [os.path.relpath(f) for f in files]
return files
class build_docutils(Command):
description = "Build documentation with Docutils."
user_options = [
('build-base=', 'b', "base directory for build library"),
('build-lib=', None, "build directory for all distribution"),
('force', 'f', 'Build documentation ignoring timestamps.')
]
def has_docs(self):
return len(doc_paths(self.distribution.packages)) > 0
def initialize_options(self):
self.build_base = 'build'
self.build_lib = None
self.force = False
def finalize_options(self):
if self.build_lib is None:
self.build_lib = os.path.join(self.build_base, 'lib')
def run(self):
args = ["rst2html.py",
"--stylesheet", "help.css",
"--link-stylesheet",
"--traceback",
"SRC_PATH_ARG_2",
"DST_PATH_ARG_3"]
#Process the reStructuredText files.
try:
for f in doc_paths(self.distribution.packages):
src = os.path.abspath(f)
dst = os.path.abspath(
os.path.join(self.build_lib, os.path.splitext(f)[0] + ".html"))
if not os.path.isdir(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
if self.force or not os.path.isfile(dst) or os.path.getmtime(src) > os.path.getmtime(dst):
print("Docutils", f)
args[-2] = os.path.abspath(src)
args[-1] = os.path.abspath(dst)
ret = subprocess.call(args)
except OSError as err:
if err.errno == errno.ENOENT:
print("error: Docutils missing.", file=sys.stderr)
raise err
#Copy CSS files
for p in doc_dirs(self.distribution.packages):
src = '/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/docutils/writers/html4css1/html4css1.css'
dst = os.path.join(self.build_lib, p, 'html4css1.css')
print("Copy", dst)
shutil.copyfile(src, dst)
files = [[os.path.join(p, f) for f in os.listdir(p)]
for p in doc_dirs(self.distribution.packages)]
files = [f for f in itertools.chain(*files)]
files = [f for f in files if os.path.splitext(f)[1] not in [".py", ".rst"]]
for f in files:
src = os.path.abspath(f)
dst = os.path.abspath(os.path.join(self.build_lib, f))
shutil.copyfile(src, dst)
| apache-2.0 | -7,969,874,219,492,791,000 | 37.990476 | 148 | 0.586957 | false |
yxdong/ybk | ybk/lighttrade/sysframe/client.py | 1 | 7588 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import random
import logging
from concurrent.futures import ThreadPoolExecutor
import requests
from requests.packages.urllib3.util import is_connection_dropped
import xmltodict
from .protocol import (UserProtocol, TradeProtocol,
MoneyProtocol, OfferProtocol)
requests.packages.urllib3.disable_warnings()
log = logging.getLogger('sysframe')
class Client(UserProtocol, TradeProtocol, MoneyProtocol, OfferProtocol):
def __init__(self,
front_url,
tradeweb_url):
"""
:param front_url: http://HOST:PORT
:param tradeweb_url: [http://HOST:PORT/issue_tradeweb/httpXmlServlet]
"""
self.front_url = front_url or ''
self.tradeweb_urls = tradeweb_url
self.tradeweb_url = random.choice(tradeweb_url)
for url in tradeweb_url:
if url.startswith(self.front_url):
self.front_url = self.tradeweb_url.rsplit('/', 2)[0]
break
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=10,
pool_maxsize=10)
self.session.mount('http://', adapter)
self.session.headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
self.executor = ThreadPoolExecutor(2)
self.executor.submit(self.warmup, 1)
self._reset()
def _reset(self):
self.cid = None # customer_id
self.uid = None # user_id
self.sid = None # session_id
self.mid = '99' # market_id
self.jsid = None # cookie
self.username = None
self.password = None
self.latency = None
self.time_offset = None
self.last_error = ''
def error(self, msg):
self.last_error = msg
log.error(msg)
@property
def is_logged_in(self):
return self.sid is not None
def request_tradeweb(self, protocol, params):
return self.request_xml(protocol, params, mode='tradeweb')
def request_front(self, protocol, params):
return self.request_xml(protocol, params, mode='front')
def request_xml(self, protocol, params, mode='tradeweb', headers={},
to=1):
""" 发送交易指令
- 拼接请求成xml
- 发送
- 解析返回的请求
"""
if mode == 'tradeweb':
url = self.tradeweb_url
elif mode == 'front':
url = self.front_url + \
'/common_front/checkneedless/user/logon/logon.action'
xml = self._create_xml(protocol, params)
log.debug('发送请求 {}: {}'.format(url, xml))
try:
r = self.session.post(
url, headers=headers, data=xml, verify=False,
timeout=(to, to))
except requests.exceptions.RequestException:
self.tradeweb_url = random.choice(self.tradeweb_urls)
if to <= 32:
to *= 2
else:
raise ValueError('连接超时')
return self.request_xml(protocol, params, mode, headers, to=to)
result = r.content.decode('gb18030', 'ignore')
log.debug('收到返回 {}'.format(result))
if len(result) > 0:
return xmltodict.parse(result)
else:
raise ValueError('请求出错, 请检查请求格式/网络连接')
def warmup(self, size=5):
""" Warmup Connection Pools """
t0 = time.time()
url = self.tradeweb_url
a = self.session.get_adapter(url)
p = a.get_connection(url)
count = 0
conns = [p._get_conn() for _ in range(size)]
for c in conns:
if is_connection_dropped(c):
count += 1
c.connect()
p._put_conn(c)
p.pool.queue = list(reversed(p.pool.queue))
if count > 0:
log.info('重新连接{}个连接, 花费{}秒'
''.format(count, time.time() - t0))
def clear_connections(self):
url = self.tradeweb_url
a = self.session.get_adapter(url)
p = a.get_connection(url)
p.pool.queue = []
def request_ff(self, requests, interval=0.001, repeat=1, response=False):
""" Fire and Forget Requests in Batch
:param requests: [(protocol, params), ...]
"""
if len(requests) * repeat > 90:
repeat = 90 // len(requests)
log.warning('批量请求次数太多, 自动降频到重复{}次'.format(repeat))
if repeat < 1:
raise ValueError('单次批量请求太多, 请设置在90以下')
xmls = [self._create_xml(protocol, params)
for protocol, params in requests]
bxmls = [xml.encode('utf-8') for xml in xmls]
url = self.tradeweb_url
a = self.session.get_adapter(url)
p = a.get_connection(url)
c = p._get_conn()
if is_connection_dropped(c):
c.connect()
hu = url[url.find('//') + 2:]
host, uri = hu.split('/', 1)
def build_request(bxml):
data = 'POST /{} HTTP/1.1\r\n'.format(uri) + \
'HOST: {}\r\n'.format(host) + \
'COOKIE: JSESSIONID={}\r\n'.format(self.jsid) + \
'Connection: Keep-Alive\r\n' + \
'Content-Length: {}\r\n'.format(len(bxml)) + \
'\r\n'
data = data.encode('gb18030') + bxml
return data
begin = time.time()
sleep_overhead = 0.0002
for _ in range(repeat):
for bxml in bxmls:
t0 = time.time()
data = build_request(bxml)
c.sock.sendall(data)
used = time.time() - t0
if used < interval - sleep_overhead:
time.sleep(interval - used - sleep_overhead)
end = time.time()
log.info('批量请求发送完毕, {}秒内发送了{}个请求'
''.format(end - begin, len(bxmls) * repeat))
# Parsing Results
if response:
results = []
count = len(xmls) * repeat
f = c.sock.makefile('rb')
while count > 0:
count -= 1
length = 0
line = f.readline().strip()
if not line.startswith(b'HTTP/1.1'):
break
while True:
line = f.readline().strip()
if not line:
break
key, value = line.split(b': ')
if key == b'Content-Length':
length = int(value)
content = f.read(length)
text = content.decode('gb18030', 'ignore')
results.append(xmltodict.parse(text))
p._put_conn(c)
return results
else:
# we are closing one connection, for performance consideration
# let's open another connection (if necessory) in background
self.executor.submit(self.warmup, 3)
c.close()
def _create_xml(self, protocol, params):
header = '<?xml version="1.0" encoding="gb2312"?>'
reqs = []
for key, value in params.items():
reqs.append('<{}>{}</{}>'.format(key, value, key))
req = ''.join(reqs)
body = '<GNNT><REQ name="{}">{}</REQ></GNNT>'.format(protocol, req)
return header + body
| mit | 2,448,941,336,220,080,600 | 32.707763 | 77 | 0.518288 | false |
tensorflow/tpu | models/experimental/show_and_tell/show_and_tell_model.py | 1 | 13116 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555.
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
import tensorflow.compat.v1 as tf
import image_embedding
import image_processing
import inputs as input_ops
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import rnn as contrib_rnn
class ShowAndTellModel(object):
"""Image-to-text implementation based on http://arxiv.org/abs/1411.4555.
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
"""
def __init__(self, config, mode, train_inception=False):
"""Basic setup.
Args:
config: Object containing configuration parameters.
mode: "train", "eval" or "inference".
train_inception: Whether the inception submodel variables are trainable.
"""
assert mode in ["train", "eval", "inference"]
self.config = config
self.mode = mode
self.train_inception = train_inception
# To match the "Show and Tell" paper we initialize all variables with a
# random uniform initializer.
self.initializer = tf.random_uniform_initializer(
minval=-self.config.initializer_scale,
maxval=self.config.initializer_scale)
# A float32 Tensor with shape [batch_size, height, width, channels].
self.images = None
# An int32 Tensor with shape [batch_size, padded_length].
self.input_seqs = None
# An int32 Tensor with shape [batch_size, padded_length].
self.target_seqs = None
# An int32 0/1 Tensor with shape [batch_size, padded_length].
self.input_mask = None
# A float32 Tensor with shape [batch_size, embedding_size].
self.image_embeddings = None
# A float32 Tensor with shape [batch_size, padded_length, embedding_size].
self.seq_embeddings = None
# A float32 scalar Tensor; the total loss for the trainer to optimize.
self.total_loss = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_losses = None
# A float32 Tensor with shape [batch_size * padded_length].
self.target_cross_entropy_loss_weights = None
# Collection of variables from the inception submodel.
self.inception_variables = []
# Function to restore the inception submodel from checkpoint.
self.init_fn = None
# Global step Tensor.
self.global_step = None
def is_training(self):
"""Returns true if the model is built for training mode."""
return self.mode == "train"
def load_image(self, encoded_image, thread_id=0):
"""Decodes and processes an image string.
Args:
encoded_image: A scalar string Tensor; the encoded image.
thread_id: Preprocessing thread id used to select the ordering of color
distortions.
Returns:
A float32 Tensor of shape [height, width, 3]; the processed image.
"""
return image_processing.process_image(
encoded_image,
is_training=self.is_training(),
height=self.config.image_height,
width=self.config.image_width,
thread_id=thread_id,
image_format=self.config.image_format)
def distort_images(self, images, seed):
"""Distort a batch of images.
(Processing a batch allows us to easily switch between TPU and CPU
execution).
"""
if self.mode == "train":
images = image_processing.distort_image(images, seed)
# Rescale to [-1,1] instead of [0, 1]
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.0)
return images
def build_inputs(self):
"""Input prefetching, preprocessing and batching.
Outputs:
self.images
self.input_seqs
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
"""
if self.mode == "inference":
# In inference mode, images and inputs are fed via placeholders.
image_feed = tf.placeholder(dtype=tf.string, shape=[], name="image_feed")
input_feed = tf.placeholder(
dtype=tf.int64,
shape=[None], # batch_size
name="input_feed")
# Process image and insert batch dimensions.
images = tf.expand_dims(self.load_image(image_feed), 0)
input_seqs = tf.expand_dims(input_feed, 1)
# No target sequences or input mask in inference mode.
target_seqs = None
input_mask = None
else:
def _load_example(serialized_example):
encoded_image, caption = input_ops.parse_example(
serialized_example,
image_feature=self.config.image_feature_name,
caption_feature=self.config.caption_feature_name)
image = self.load_image(encoded_image)
# strings.split expects a batch
input_seqs, target_seqs, input_mask = input_ops.pad_caption_to_input(
caption)
return image, input_seqs, target_seqs, input_mask
def _load_dataset(filename):
return tf.data.TFRecordDataset(filename, buffer_size=16 * 1024 * 1024)
df = tf.data.Dataset.list_files(
self.config.input_file_pattern, shuffle=self.mode == "train")
df = df.apply(
tf.data.experimental.parallel_interleave(
_load_dataset, cycle_length=64, sloppy=True))
if self.mode == "train":
df = df.repeat()
df = df.shuffle(1024)
df = df.apply(
tf.data.experimental.map_and_batch(
_load_example,
self.config.batch_size,
num_parallel_batches=8,
drop_remainder=True))
df = df.prefetch(8)
images, input_seqs, target_seqs, input_mask = df.make_one_shot_iterator(
).get_next()
self.images = images
self.input_seqs = input_seqs
self.target_seqs = target_seqs
self.input_mask = input_mask
def build_image_embeddings(self, images):
"""Builds the image model subgraph and generates image embeddings."""
images = self.distort_images(images, tf.train.get_or_create_global_step())
inception_output = image_embedding.inception_v3(
images,
trainable=self.train_inception,
is_training=self.is_training(),
add_summaries=False)
self.inception_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")
# Map inception output into embedding space.
with tf.variable_scope("image_embedding") as scope:
image_embeddings = contrib_layers.fully_connected(
inputs=inception_output,
num_outputs=self.config.embedding_size,
activation_fn=None,
weights_initializer=self.initializer,
biases_initializer=None,
scope=scope)
# Save the embedding size in the graph.
tf.constant(self.config.embedding_size, name="embedding_size")
return image_embeddings
def build_seq_embeddings(self, input_seqs):
"""Builds the input sequence embeddings.
Inputs:
input_seqs
Outputs:
self.seq_embeddings
"""
with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"):
embedding_map = tf.get_variable(
name="map",
shape=[self.config.vocab_size, self.config.embedding_size],
initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, input_seqs)
return seq_embeddings
def build_model(self):
"""Builds the model.
Inputs:
self.image_embeddings
self.seq_embeddings
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
Outputs:
self.total_loss (training and eval only)
self.target_cross_entropy_losses (training and eval only)
self.target_cross_entropy_loss_weights (training and eval only)
"""
# This LSTM cell has biases and outputs tanh(new_c) * sigmoid(o), but the
# modified LSTM in the "Show and Tell" paper has no biases and outputs
# new_c * sigmoid(o).
lstm_cell = contrib_rnn.BasicLSTMCell(
num_units=self.config.num_lstm_units, state_is_tuple=True)
if self.mode == "train":
lstm_cell = contrib_rnn.DropoutWrapper(
lstm_cell,
input_keep_prob=self.config.lstm_dropout_keep_prob,
output_keep_prob=self.config.lstm_dropout_keep_prob)
with tf.variable_scope("lstm", initializer=self.initializer) as lstm_scope:
# Feed the image embeddings to set the initial LSTM state.
zero_state = lstm_cell.zero_state(
batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)
_, initial_state = lstm_cell(self.image_embeddings, zero_state)
# Allow the LSTM variables to be reused.
lstm_scope.reuse_variables()
if self.mode == "inference":
# In inference mode, use concatenated states for convenient feeding and
# fetching.
tf.concat(initial_state, 1, name="initial_state")
# Placeholder for feeding a batch of concatenated states.
state_feed = tf.placeholder(
dtype=tf.float32,
shape=[None, sum(lstm_cell.state_size)],
name="state_feed")
state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
# Run a single LSTM step.
lstm_outputs, state_tuple = lstm_cell(
inputs=tf.squeeze(self.seq_embeddings, squeeze_dims=[1]),
state=state_tuple)
# Concatentate the resulting state.
tf.concat(state_tuple, 1, name="state")
else:
# Run the batch of sequence embeddings through the LSTM.
sequence_length = tf.reduce_sum(self.input_mask, 1)
lstm_outputs, _ = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=self.seq_embeddings,
sequence_length=sequence_length,
initial_state=initial_state,
dtype=tf.float32,
scope=lstm_scope)
# Stack batches vertically.
lstm_outputs = tf.reshape(lstm_outputs, [-1, lstm_cell.output_size])
with tf.variable_scope("logits") as logits_scope:
logits = contrib_layers.fully_connected(
inputs=lstm_outputs,
num_outputs=self.config.vocab_size,
activation_fn=None,
weights_initializer=self.initializer,
scope=logits_scope)
if self.mode == "inference":
tf.nn.softmax(logits, name="softmax")
else:
targets = tf.reshape(self.target_seqs, [-1])
weights = tf.to_float(tf.reshape(self.input_mask, [-1]))
# Compute losses.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
batch_loss = tf.div(
tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights),
name="batch_loss")
tf.losses.add_loss(batch_loss)
total_loss = tf.losses.get_total_loss()
self.total_loss = total_loss
self.target_cross_entropy_losses = losses # Used in evaluation.
self.target_cross_entropy_loss_weights = weights # Used in evaluation.
def setup_inception_initializer(self):
"""Sets up the function to restore inception variables from checkpoint."""
if self.mode != "inference":
# Restore inception variables only.
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info("Restoring Inception variables from checkpoint file %s",
self.config.inception_checkpoint_file)
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn
def setup_global_step(self):
"""Sets up the global step Tensor."""
self.global_step = tf.train.get_or_create_global_step()
def build_model_for_tpu(self, images, input_seqs, target_seqs, input_mask):
self.image_embeddings = self.build_image_embeddings(images)
self.seq_embeddings = self.build_seq_embeddings(target_seqs)
self.target_seqs = target_seqs
self.input_mask = input_mask
self.build_model()
def build(self):
"""Creates all ops for training and evaluation."""
self.build_inputs()
self.image_embeddings = self.build_image_embeddings(self.images)
self.seq_embeddings = self.build_seq_embeddings(self.input_seqs)
self.build_model()
self.setup_inception_initializer()
self.setup_global_step()
| apache-2.0 | 4,280,079,833,549,058,000 | 34.448649 | 80 | 0.658432 | false |
mganeva/mantid | scripts/HFIR_4Circle_Reduction/optimizelatticewindow.py | 1 | 3900 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=C0103
from __future__ import (absolute_import, division, print_function)
from qtpy.QtWidgets import (QMainWindow)
from qtpy.QtCore import Signal as pyqtSignal
from mantid.kernel import Logger
try:
from mantidqt.utils.qt import load_ui
except ImportError:
Logger("HFIR_4Circle_Reduction").information('Using legacy ui importer')
from mantidplot import load_ui
class OptimizeLatticeWindow(QMainWindow):
"""
Main window widget to set up parameters to optimize
"""
# establish signal for communicating from App2 to App1 - must be defined before the constructor
mySignal = pyqtSignal(int)
def __init__(self, parent=None):
"""
Initialization
:param parent:
:return:
"""
# init
QMainWindow.__init__(self, parent)
ui_path = "OptimizeLattice.ui"
self.ui = load_ui(__file__, ui_path, baseinstance=self)
# initialize widgets
self.ui.comboBox_unitCellTypes.addItems(['Cubic',
'Tetragonal',
'Orthorhombic',
'Hexagonal',
'Rhombohedral',
'Monoclinic',
'Triclinic'])
self.ui.comboBox_ubSource.addItems(['Tab - Calculate UB Matrix', 'Tab - Accepted UB Matrix'])
self.ui.lineEdit_tolerance.setText('0.12')
# define event handling
self.ui.pushButton_Ok.clicked.connect(self.do_ok)
self.ui.pushButton_cancel.clicked.connect(self.do_quit)
if parent is not None:
# connect to the method to refine UB matrix by constraining lattice parameters
self.mySignal.connect(parent.refine_ub_lattice)
# flag to trace back its previous step
self._prevIndexByFFT = False
return
def do_ok(self):
"""
User decide to go on and then send a signal to parent
:return:
"""
tolerance = self.get_tolerance()
if tolerance is None:
raise RuntimeError('Tolerance cannot be left blank!')
# set up a hand-shaking signal
signal_value = 1000
self.mySignal.emit(signal_value)
# quit
self.do_quit()
return
def do_quit(self):
"""
Quit the window
:return:
"""
self.close()
return
def get_unit_cell_type(self):
"""
Get the tolerance
:return:
"""
unit_cell_type = str(self.ui.comboBox_unitCellTypes.currentText())
return unit_cell_type
def get_tolerance(self):
"""
Get the tolerance for refining UB matrix with unit cell type.
:return:
"""
tol_str = str(self.ui.lineEdit_tolerance.text()).strip()
if len(tol_str) == 0:
# blank: return None
tol = None
else:
tol = float(tol_str)
return tol
def get_ub_source(self):
"""
Get the index of the tab where the UB matrix comes from
:return:
"""
source = str(self.ui.comboBox_ubSource.currentText())
if source == 'Tab - Calculate UB Matrix':
tab_index = 3
else:
tab_index = 4
return tab_index
def set_prev_ub_refine_method(self, use_fft=False):
"""
:param use_fft:
:return:
"""
self._prevIndexByFFT = use_fft
return
| gpl-3.0 | -8,642,162,336,785,655,000 | 27.057554 | 101 | 0.554103 | false |
alexandregz/simian | src/simian/util/compile_js.py | 1 | 1961 | #!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Uses Closure Compiler Service API to compile JavaScript file.
# ./compile_js.py <path_to_input_js_file> ... <path_to_output_js_file>
import httplib
import urllib
import re
import sys
CLOSURE_SERVICE_DOMAIN = 'closure-compiler.appspot.com'
BASE_URL = 'https://raw.githubusercontent.com/google/simian/master/src/simian/mac/admin/js/'
JS_FILES = ['main.js', 'forms.js', 'menu.js', 'net.js', 'tags.js']
CODE_URLS = [BASE_URL + f for f in JS_FILES]
output_js_file = sys.argv[1]
# Param docs: https://developers.google.com/closure/compiler/docs/api-ref
params = [
('compilation_level', 'ADVANCED_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'compiled_code'),
('use_closure_library', True),
]
for url in CODE_URLS:
params.append(('code_url', url))
params = urllib.urlencode(params)
# Always use the following value for the Content-type header.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection(CLOSURE_SERVICE_DOMAIN)
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
response_text = response.read()
conn.close()
if response.status != 200 or response_text.startswith('Error'):
print >>sys.stderr, 'JS compilation failed: %s' % response_text
sys.exit(1)
f = open(output_js_file, 'w')
f.write(response_text)
f.close()
| apache-2.0 | 2,229,931,755,863,811,600 | 31.147541 | 92 | 0.721061 | false |
gonicus/gosa | doc/sphinx-cindex/setup.py | 1 | 1188 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
long_desc = '''
This package contains the cindex Sphinx extension.
Allows declaring cindex specs wherever in the documentation (for instance,
in docstrings of UnitTest.test_* methods) and displaying them as a single
list.
'''
requires = ['Sphinx>=0.6']
setup(
name='sphinxcontrib-cindex',
version='0.1',
license='GPL',
author='Fabian Hickert',
author_email='hickert@gonicus.de',
description='Sphinx "cindex" extension',
long_description=long_desc,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=requires,
namespace_packages=['sphinxcontrib'],
package_data={'sphinxcontrib': ['cindex.css']},
)
| lgpl-2.1 | -4,032,579,850,245,826,600 | 27.285714 | 74 | 0.648148 | false |
npawelek/rpc-maas | playbooks/files/rax-maas/plugins/neutron_api_local_check.py | 1 | 3562 | #!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ipaddr
from maas_common import generate_local_endpoint
from maas_common import get_openstack_client
from maas_common import metric
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
from requests import exceptions as exc
def check(args):
neutron = get_openstack_client('network')
try:
neutron_local_endpoint = generate_local_endpoint(
str(neutron.get_endpoint()), args.ip, args.port,
args.protocol, '/agents'
)
resp = neutron.session.get(neutron_local_endpoint, timeout=180)
except (exc.ConnectionError, exc.HTTPError, exc.Timeout):
is_up = False
metric_bool('client_success', False, m_name='maas_neutron')
# Any other exception presumably isn't an API error
except Exception as e:
metric_bool('client_success', False, m_name='maas_neutron')
status_err(str(e), m_name='maas_neutron')
else:
is_up = True
milliseconds = resp.elapsed.total_seconds() * 1000
metric_bool('client_success', True, m_name='maas_neutron')
# Gather a few metrics
agents = len(resp.json()['agents'])
networks = len([i for i in neutron.networks()])
routers = len([i for i in neutron.routers()])
subnets = len([i for i in neutron.subnets()])
status_ok(m_name='maas_neutron')
metric_bool('neutron_api_local_status', is_up, m_name='maas_neutron')
# Only send metrics if the API is up
if is_up:
metric('neutron_api_local_response_time',
'double',
'%.3f' % milliseconds,
'ms')
metric('neutron_agents', 'uint32', agents, 'agents')
metric('neutron_networks', 'uint32', networks, 'networks')
metric('neutron_routers', 'uint32', routers, 'agents')
metric('neutron_subnets', 'uint32', subnets, 'subnets')
def main(args):
check(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Check Neutron API against local or remote address')
parser.add_argument('ip', nargs='?',
type=ipaddr.IPv4Address,
help='Optional Neutron API server address')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
parser.add_argument('--port',
action='store',
default='9696',
help='Port for neutron API service')
parser.add_argument('--protocol',
action='store',
default='http',
help='Protocol for the neutron API service')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
main(args)
| apache-2.0 | -165,083,766,257,173,760 | 36.104167 | 74 | 0.623526 | false |
billzorn/msp-pymodel | lib/msp_assem.py | 1 | 7687 | import msp_fr5969_model as model
from msp_isa import isa
# low level wrappers for isa methods
def _as(fmt, name, smode, dmode, fields):
ins = isa.modes_to_instr(fmt, name, smode, dmode)
#print('{:s} {:s} {:s} {:s}'.format(name, smode, dmode, repr(fields)))
words = isa.inhabitant(ins, fields)
return words
def assemble(name, smode, dmode, fields):
fmt = isa.name_to_fmt[name]
return _as(fmt, name, smode, dmode, fields)
# We record used registers as sets: this could be very compactly represented
# with machine integer backed bit sets, but whatever.
# We distinguish between two different ways to "use" a register: a "use" depends
# on the data in it, so other instructions are not free to overwrite it. A
# "clobber" puts unknown data into the register (due to expected differences
# between the hardware and the simulator) and needs to be cleaned up at some
# point.
class Reginfo(object):
def __init__(self, uses = {}, clobbers = []):
self.uses = uses
self.clobbers = set(clobbers)
def conflict(self, reg):
if reg in self.uses:
return self.uses[reg]
elif reg in self.clobbers:
return True
return False
def add(self, uses = {}, clobbers = []):
for use in uses:
if use in self.uses:
raise ValueError('conflict: already using {:s}'.format(repr(use)))
self.uses[use] = uses[use]
for clobber in clobbers:
self.clobbers.add(clobber)
# returns the value if already set, and the check passes. if the check fails, raises
# an exception. returns false (and does't check!) if not already set.
def check_or_set_use(self, rn, pred, default):
if rn in self.uses:
if not pred(self.uses[rn]):
raise ValueError('conflict: predicate {:s} failed for {:x}: {:s}'
.format(repr(pred), rn, repr(self.uses[rn])))
return self.uses[rn]
else:
if not pred(default):
raise ValueError('conflict: predicate {:s} failed for {:x}: {:s}'
.format(repr(pred), rn, repr(default)))
self.uses[rn] = default
return False
def overwrite_or_set_use(self, rn, x):
if rn in self.uses:
self.uses[rn] = x
# we did overwrite
return True
else:
self.uses[rn] = x
# set the value anyway
return False
# helpful predicates:
def has_immediate(mode):
if mode in {'X(Rn)', 'ADDR', '&ADDR', '#@N', '#N'}:
return True
elif mode in {'Rn', '#1', '@Rn', '@Rn+', 'none'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def has_reg(mode):
if mode in {'Rn', 'X(Rn)', '@Rn', '@Rn+'}:
return True
elif mode in {'ADDR', '&ADDR', '#1', '#@N', '#N', 'none'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
# Will return None if the mode is not a cg mode. Otherwise will return
# the constant being generated, which might be 0 (which is False).
def has_cg(mode, rn):
if mode == 'Rn':
if rn == 3:
return 0 # the same as reading the register
elif mode == 'X(Rn)':
if rn == 2:
return 0 # alternative encoding of &ADDR mode
elif rn == 3:
return 1 # alternative encoding of #1 mode
elif mode == '@Rn':
if rn == 2:
return 4
elif rn == 3:
return 2
elif mode == '@Rn+':
if rn == 2:
return 8
elif rn == 3:
return -1
return None
def uses_addr(mode, rn):
if mode in {'X(Rn)', 'ADDR', '&ADDR', '@Rn', '@Rn+'}:
return not has_cg(mode, rn)
elif mode in {'Rn', '#1', '#@N', '#N', 'none'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def uses_reg(mode, rn):
if mode in {'Rn', 'X(Rn)', '@Rn', '@Rn+'}:
return has_cg(mode, rn) is not None
elif mode in {'ADDR', '&ADDR', '#1', '#@N', '#N', 'none'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def modifies_destination(name):
if name in {'MOV', 'ADD', 'ADDC', 'SUBC', 'SUB', 'DADD', 'BIC', 'BIS', 'XOR', 'AND',
'RRC', 'SWPB', 'RRA', 'SXT'}:
return True
else:
return False
def modifies_sr(name):
if name in {'ADD', 'ADDC', 'SUBC', 'SUB', 'CMP', 'DADD', 'BIT', 'XOR', 'AND',
'RRC', 'RRA', 'RETI', 'SXT'}:
return True
else:
return False
# assembly with dynamic computation of symbols
def assemble_sym(name, smode, dmode, symfields, pc, labels):
fields = {}
for fieldname in symfields:
sym_v = symfields[fieldname]
if isinstance(sym_v, tuple):
if sym_v[0] == 'PC_ABS':
addr = sym_v[1]
offs = pc
if fieldname in {'isrc'}:
offs += 2
elif fieldname in {'idst'}:
offs += 2
if has_immediate(smode):
offs += 2
v = (addr - offs) & 0xffff #TODO hard-coded 16-bit immediate
elif sym_v[0] == 'LABEL':
# initial implementation: immediate lookup
v = labels[sym_v[1]]
# This requires all of the addresses to be precomputed if we want to
# be able to jump to labels after this instruction.
elif sym_v[0] == 'JLABEL':
# offset to jump label
addr = labels[sym_v[1]]
offs = pc + 2
immediate = (addr - offs) & 0x7ff #TODO hard-coded 11-bit immediate
v = immediate >> 1 & 0x3ff #TODO hard-coded 9-bit immediate
elif sym_v[0] == 'JSIGN':
# sign for offset to jump label
addr = labels[sym_v[1]]
offs = pc + 2
immediate = (addr - offs) & 0x7ff #TODO hard-coded 11-bit immediate
v = immediate >> 10 & 0x1
else:
raise ValueError('unsupported assembly directive: {:s}'.format(sym_v[0]))
else:
v = sym_v
fields[fieldname] = v
return assemble(name, smode, dmode, fields)
def assemble_symregion(instructions, base_pc, labels = {}):
# precompute addresses of labels
pc_pre = base_pc
for args in instructions:
if isinstance(args, str):
labels[args] = pc_pre
else:
name, smode, dmode, fields = args
pc_pre += 2
if has_immediate(smode):
pc_pre += 2
if has_immediate(dmode):
pc_pre += 2
# go back and generate encoding
words = []
pc = base_pc
for args in instructions:
if isinstance(args, str):
assert labels[args] == pc
else:
new_words = assemble_sym(*(args + (pc, labels)))
pc += len(new_words) * 2
words += new_words
# for label in labels:
# print('{:s} : {:s}'.format(label, hex(labels[label])))
assert pc == pc_pre
return words
def region_size(instructions):
size = 0
for args in instructions:
if isinstance(args, str):
# label, skip
continue
else:
name, smode, dmode, fields = args
size += 2
if has_immediate(smode):
size += 2
if has_immediate(dmode):
size += 2
return size
| mit | 1,911,821,831,308,556,000 | 33.164444 | 89 | 0.525433 | false |
myfavouritekk/TPN | tools/propagate/sequence_roi_propagation.py | 1 | 6286 | #!/usr/bin/env python
# --------------------------------------------------------
# Test regression propagation on ImageNet VID video
# Modified by Kai KANG (myfavouritekk@gmail.com)
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import argparse
import pprint
import time
import os
import os.path as osp
import sys
import cPickle
import numpy as np
this_dir = osp.dirname(__file__)
# add caffe-mpi path
sys.path.insert(0, osp.join(this_dir, '../../external/caffe-mpi/build/install/python'))
import caffe
# add py-faster-rcnn paths
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib'))
from fast_rcnn.craft import sequence_im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
# add external libs
sys.path.insert(0, osp.join(this_dir, '../../external'))
from vdetlib.utils.protocol import proto_load, proto_dump
# add src libs
sys.path.insert(0, osp.join(this_dir, '../../src'))
from tpn.propagate import sequence_roi_propagation
from tpn.target import add_track_targets
from tpn.data_io import save_track_proto_to_zip
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('vid_file')
parser.add_argument('box_file')
parser.add_argument('save_file', help='Save zip file')
parser.add_argument('--annot_file', default=None,
help='Ground truth annotation file. [None]')
parser.add_argument('--job', dest='job_id', help='Job slot, GPU ID + 1. [1]',
default=1, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--param', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--num_per_batch', dest='boxes_num_per_batch',
help='split boxes to batches. [32]',
default=32, type=int)
parser.add_argument('--bbox_mean', dest='bbox_mean',
help='the mean of bbox',
default=None, type=str)
parser.add_argument('--bbox_std', dest='bbox_std',
help='the std of bbox',
default=None, type=str)
parser.add_argument('--bbox_pred_layer', dest='bbox_pred_layer',
help='Layer name for bbox regression layer in feature net.',
default='bbox_pred_vid', type=str)
parser.add_argument('--length', type=int, default=9,
help='Propagation length. [9]')
parser.add_argument('--sample_rate', type=int, default=1,
help='Temporal subsampling rate. [1]')
parser.add_argument('--offset', type=int, default=0,
help='Offset of sampling. [0]')
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--gpus', nargs='+', default=None, type=int, help='Available GPUs.')
parser.add_argument('--zip', action='store_true',
help='Save as zip files rather than track protocols')
parser.add_argument('--keep_feat', action='store_true',
help='Keep feature.')
parser.set_defaults(vis=False, zip=False, keep_feat=False)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print 'Called with args:'
print args
if osp.isfile(args.save_file):
print "{} already exists.".format(args.save_file)
sys.exit(1)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.job_id - 1
print 'Using config:'
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print 'Waiting for {} to exist...'.format(args.caffemodel)
time.sleep(10)
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
net.params[args.bbox_pred_layer][0].data[...] = \
net.params[args.bbox_pred_layer][0].data * bbox_stds[:, np.newaxis]
net.params[args.bbox_pred_layer][1].data[...] = \
net.params[args.bbox_pred_layer][1].data * bbox_stds + bbox_means
vid_proto = proto_load(args.vid_file)
box_proto = proto_load(args.box_file)
window = net.params[args.bbox_pred_layer][0].data.shape[0] / 4 + 1
track_proto = sequence_roi_propagation(vid_proto, box_proto, net, sequence_im_detect,
window = window,
length=args.length, sample_rate=args.sample_rate,
keep_feat=args.keep_feat, batch_size=args.boxes_num_per_batch)
# add ground truth targets if annotation file is given
if args.annot_file is not None:
annot_proto = proto_load(args.annot_file)
add_track_targets(track_proto, annot_proto)
if args.zip:
save_track_proto_to_zip(track_proto, args.save_file)
else:
proto_dump(track_proto, args.save_file)
| mit | 6,455,828,710,421,682,000 | 37.802469 | 92 | 0.592587 | false |
skoolkid/pyskool | pyskool/game.py | 1 | 20282 | # -*- coding: utf-8 -*-
# Copyright 2008, 2010, 2012-2015 Richard Dymond (rjdymond@gmail.com)
#
# This file is part of Pyskool.
#
# Pyskool is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Pyskool is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pyskool. If not, see <http://www.gnu.org/licenses/>.
"""
Defines the :class:`Game` class.
"""
import sys
import gzip
import os
import pickle
import pygame
import random
import time
from .cast import Cast
from .character import Character
from .skool import Skool
from .graphics import Screen, Gallery
from .sound import Beeper
from .input import Keyboard
from .iniparser import IniParser
from . import skoolbuilder
from . import keys
from . import items
from . import debug
#: Menu operation: Resume.
RESUME = 'RESUME'
#: Menu operation: Save.
SAVE = 'SAVE'
#: Menu operation: Load.
LOAD = 'LOAD'
#: Menu operation: Quit.
QUIT = 'QUIT'
#: Menu operation: Increase scale.
SCALE_UP = 'SCALE_UP'
#: Menu operation: Decrease scale.
SCALE_DOWN = 'SCALE_DOWN'
#: Menu operation: Toggle fullscreen mode.
TOGGLE_FULLSCREEN = 'TOGGLE_FULLSCREEN'
#: Menu operation: Update the screen.
UPDATE = 'UPDATE'
class Game:
"""Builds the skool and the cast, and executes the main loop of the
game.
:param ini_file: The main Pyskool ini file.
:param images_dir: The path to the `images` directory.
:param sounds_dir: The path to the `sounds` directory.
:type scale: number
:param ini_dir: The directory to scan for game ini files.
:param options: Options passed from the command line.
:type version: string
:param version: The version number of Pyskool.
:param sav_file: A file from which to restore a saved game.
"""
def __init__(self, ini_file, images_dir, sounds_dir, ini_dir, options, version, sav_file):
# Reduce latency in Pygame 1.8+
pygame.mixer.pre_init(44100, -16, 1, 1024)
pygame.init()
if pygame.mixer.get_init() is None:
sys.stdout.write("WARNING: pygame.mixer failed to initialise; there will be no sound\n")
parser = IniParser(ini_file)
self._create_key_bindings(parser.parse_section('Keys'))
self.menus = {}
self.menu_config = {}
for menu_name in ('Main', 'Quit'):
self.menu_config[menu_name] = (
parser.get_config('Menu {0}'.format(menu_name)),
parser.parse_section('MenuItems {0}'.format(menu_name))
)
self.images_dir = images_dir
self.sounds_dir = sounds_dir
self.speed = 1
self.screenshot = 0
self.version = version
self.screen = None
self.menu = None
builder = skoolbuilder.SkoolBuilder(ini_dir)
builder.sections['ExtraConfig'] = options.config or []
config = builder.get_config('[A-Za-z]+Config')
self.scale = options.scale or config.get('Scale', 2)
self.cheat = options.cheat or config.get('Cheat', 0)
self.quick_start = options.quick_start or config.get('QuickStart', 0)
self.ring_bell = not self.quick_start
self.confirm_close = config.get('ConfirmClose', 0)
self.confirm_quit = config.get('ConfirmQuit', 1)
if sav_file:
if os.path.isfile(sav_file):
self.quick_start = True
self.ring_bell = True
self._load(sav_file)
return
debug.log('Unable to restore from %s: file not found' % sav_file)
image_set = config.get('ImageSet', 'original')
gallery = Gallery(images_dir, image_set, self.scale, builder.get_config(skoolbuilder.IMAGES))
title_prefix = 'Pyskool %s: ' % version
self.screen = Screen(config, gallery, title_prefix)
self.beeper = Beeper(sounds_dir, config)
self.cast = Cast(config, self.screen, gallery)
self.skool = Skool(config, self.screen, self.beeper, self.cast, gallery)
builder.build_skool(self.skool)
self.keyboard = Keyboard()
self.skool.initialise_cast(self.keyboard)
self.screen.initialise_column(self.skool.get_width(), self.cast.eric.x)
self.screen.setup()
self.skool.update_score_box()
self._build_menus()
def _build_menus(self):
"""Build the menus."""
for menu_name, (menu_config, menu_items) in self.menu_config.items():
self.menus[menu_name] = Menu(self.screen, menu_config, menu_items)
def _build_menu_images(self):
for menu in self.menus.values():
menu.build_images(self.screen)
def _handle_menu(self):
"""Handle keypresses while a menu is displayed."""
draw = False
refresh = False
status = ''
operation = self.menu.get_operation(self.keyboard)
if operation == UPDATE:
draw = True
elif operation == RESUME:
self.menu = None
self.skool.draw()
elif operation == SAVE:
sav_file = self._save_game()
status = 'Saved %s' % sav_file
elif operation == LOAD:
sav_file = self._load_last()
status = 'Loaded %s' % sav_file if sav_file else 'No saved games found'
self.skool.draw(False)
refresh = True
elif operation == SCALE_UP:
self.screen.scale_up()
self.skool.restore()
self.skool.draw(False)
refresh = True
status = 'Scale set to %i' % self.screen.scale
self._build_menu_images()
elif operation == SCALE_DOWN:
if self.screen.scale_down():
self.skool.restore()
self.skool.draw(False)
refresh = True
status = 'Scale set to %i' % self.screen.scale
self._build_menu_images()
elif operation == TOGGLE_FULLSCREEN:
pygame.display.toggle_fullscreen()
elif operation == QUIT:
return True
if draw or status:
self.menu.status = status
self.screen.draw_menu(self.menu, refresh)
self.clock.tick(10)
self.keyboard.pump()
return False
def _create_key_bindings(self, keydefs):
"""Create the key bindings.
:param keydefs: Key definitions.
"""
for keydef in keydefs:
action = keydef[0]
bindings = []
for kdef in keydef[1:]:
try:
bindings.append(getattr(pygame, 'K_%s' % kdef))
except AttributeError:
debug.log('%s: key ID not recognised' % kdef)
if bindings:
setattr(keys, action, bindings)
def _no_lines(self, *args):
"""Method used to replace
:meth:`~pyskool.character.Character.give_lines` when the
:data:`~pyskool.keys.NO_LINES` cheat key is pressed. The method does
nothing.
"""
return
_give_lines = Character.give_lines
def _save(self, fname):
"""Save the current game state.
:param fname: The name of the file to save to.
"""
save_game_dir = os.path.join(*self.skool.save_game_dir.split('/'))
if not os.path.isdir(save_game_dir):
os.makedirs(save_game_dir)
ofile = os.path.join(save_game_dir, fname)
f = gzip.open(ofile, 'wb', self.skool.save_game_compression)
pickle.dump(self.skool, f)
f.close()
debug.log('Skool saved to %s' % os.path.abspath(ofile))
def _load(self, fname):
"""Load a saved game.
:param fname: The name of the file to load from.
"""
scale = self.screen.scale if self.screen else self.scale
start = time.time()
f = gzip.open(fname, 'rb')
self.skool = pickle.load(f)
f.close()
# Restore instance variables
self.screen = self.skool.screen
self.beeper = self.skool.beeper
self.cast = self.skool.cast
self.keyboard = self.cast.eric.keyboard
# Perform necessary post-load tasks
if scale:
self.screen.scale = scale
self.skool.beeper.restore(self.sounds_dir)
self.skool.gallery.restore(self.images_dir)
self.skool.restore()
self._build_menus()
debug.log('Skool loaded from %s in %0.2fs' % (os.path.abspath(fname), time.time() - start))
def _save_game(self):
"""Save the game."""
fname = '%s.sav' % time.strftime('%Y%m%d-%H%M%S')
self._save(fname)
return fname
def _load_last(self):
"""Load the most recently saved game."""
save_game_dir = os.path.abspath(self.skool.save_game_dir)
if not os.path.isdir(save_game_dir):
debug.log("Cannot load games from '%s': directory not found" % save_game_dir)
return
sav_files = [f for f in os.listdir(save_game_dir) if f.endswith('.sav') and os.path.isfile(os.path.join(save_game_dir, f))]
if not sav_files:
debug.log("No saved games found in '%s'" % save_game_dir)
return
sav_files.sort()
sav_file = sav_files[-1]
self._load(os.path.join(save_game_dir, sav_file))
return sav_file
def _check_cheat_keys(self):
"""Check whether any cheat keys were pressed, and take appropriate
action. This method is called from the main loop.
"""
if self.keyboard.is_pressed(keys.SLOW):
self.speed = 0.5
elif self.keyboard.is_pressed(keys.FAST):
self.speed = 2
else:
self.speed = 1
if self.keyboard.was_pressed(keys.NEXT_LESSON):
self.skool.next_lesson(False)
if self.skool.shields:
if self.keyboard.was_pressed(keys.FLASH_MOST):
for shield in self.skool.shields[1:]:
shield.flash()
self.skool.unflash(self.skool.shields[0])
self.skool.unflash(self.skool.safe)
self.skool.shield_mode = 1
debug.log('Flashed all but one shield; hit it and then open the safe')
if self.keyboard.was_pressed(keys.UNFLASH_MOST):
for shield in self.skool.shields[1:]:
self.skool.unflash(shield)
self.skool.shields[0].flash()
self.skool.safe.flash()
self.skool.shield_mode = 3
debug.log('Unflashed all but one shield; hit it to go up a year')
if self.keyboard.was_pressed(keys.NO_LINES):
if Character.give_lines == self._no_lines:
Character.give_lines = self._give_lines
debug.log('Enabled lines-giving')
else:
Character.give_lines = self._no_lines
debug.log('Disabled lines-giving')
if self.keyboard.was_pressed(keys.ADD_LINES):
lines = 100 * random.randrange(*self.cast.lines_range)
self.skool.add_lines(lines)
debug.log('Added %i lines' % lines)
if self.keyboard.was_pressed(keys.ZERO_LINES):
self.skool.add_lines(-self.skool.scoreboard.lines)
debug.log('Set lines total to zero')
if self.keyboard.was_pressed(keys.REVEAL):
for room in self.skool.rooms.values():
for desk in room.desks:
if desk.contents:
debug.log('%s x=%i: %s' % (room.name, desk.x, desk.contents))
for c in self.cast.character_list:
if c.special_answer:
debug.log('%s: %s' % (c.name, c.special_answer))
if self.skool.safe_combination:
debug.log('Safe: %s' % self.skool.safe_combination)
if self.skool.bike_combination:
debug.log('Bike: %s' % self.skool.bike_combination)
if self.skool.storeroom_combination:
debug.log('Storeroom: %s' % self.skool.storeroom_combination)
if self.skool.inventory_item_ids:
eric = self.cast.eric
inventory = eric.inventory
if self.keyboard.was_pressed(keys.SWITCH_PISTOL):
if items.WATER_PISTOL in inventory:
inventory.remove(items.WATER_PISTOL)
inventory.add(items.SHERRY_PISTOL)
elif items.SHERRY_PISTOL in inventory:
inventory.remove(items.SHERRY_PISTOL)
inventory.add(items.WATER_PISTOL)
eric.print_inventory()
if self.keyboard.was_pressed(keys.GIVE_ALL):
inventory.update((items.SAFE_KEY, items.STOREROOM_KEY, items.FROG, items.WATER_PISTOL, items.STINKBOMBS3))
if self.cast.frogs:
self.cast.frogs[0].hide()
eric.mice = 8
eric.print_inventory()
eric.print_mouse_inventory()
self.skool.unchain_bike()
if self.skool.doors:
if self.keyboard.was_pressed(keys.OPEN_DOORS):
for door_id in self.skool.doors:
self.skool.move_door(door_id, False)
for window_id in self.skool.windows:
self.skool.move_door(window_id, False)
debug.log('Opened all doors and windows')
if self.keyboard.was_pressed(keys.CLOSE_DOORS):
for door_id in self.skool.doors:
self.skool.move_door(door_id, True)
for window_id in self.skool.windows:
self.skool.move_door(window_id, True)
debug.log('Closed all doors and windows')
def _take_screenshot(self):
"""Take a screenshot."""
timestamp = time.strftime('%Y%m%d-%H%M%S')
img_fname = '{}-{:03d}.png'.format(timestamp, self.screenshot)
scrshot_dir = self.skool.screenshot_dir
if not os.path.isdir(scrshot_dir):
os.makedirs(scrshot_dir)
img_path = os.path.join(scrshot_dir, img_fname)
self.screen.take_screenshot(img_path)
self.screenshot += 1
debug.log('Took screenshot: {}'.format(img_path))
def play(self):
"""Start the game and enter the main loop."""
self.clock = pygame.time.Clock()
self.paused = False
while True:
self.scroll = 0
if not self.quick_start:
self.skool.scroll_on(self.clock)
self.quick_start = False
while not self.skool.game_over:
if self._main_loop():
return
self.skool.reinitialise()
def _main_loop(self):
"""The main loop of the game. The following things are done in the main
loop:
* check the keyboard and act on keypresses
* advance the skool clock
* move the characters
* shut any auto-shutting doors that need shutting
* update the screen
* scroll the screen if necessary
:return: `True` if the game is quitting, `False` otherwise.
"""
if self.keyboard.was_pressed(keys.FULL_SCREEN, force_check=True):
pygame.display.toggle_fullscreen()
return False
if self.keyboard.was_pressed(keys.SCREENSHOT, force_check=True):
self._take_screenshot()
if self.menu:
return self._handle_menu()
show_quit_menu = False
if self.keyboard.got_quit():
if not self.confirm_close:
return True
show_quit_menu = True
elif self.keyboard.was_pressed(keys.QUIT, force_check=True):
if not self.confirm_quit:
return True
show_quit_menu = True
if show_quit_menu:
self.menu = self.menus['Quit']
self.menu.reset()
self.screen.draw_menu(self.menu)
self.beeper.pause()
return False
self.paused ^= self.keyboard.was_pressed(keys.PAUSE, force_check=True)
if self.paused:
self.beeper.pause()
self.clock.tick(10)
self.keyboard.pump()
return False
self.beeper.unpause()
if self.beeper.is_busy():
self.keyboard.pump()
return False
if self.skool.suspended:
# The skool was suspended while a sound effect played; now resume
if not self.skool.locked:
self.skool.draw()
if self.scroll:
self.skool.scroll(self.scroll, self.clock)
self.scroll = 0
self.skool.resume()
return False
if self.skool.tick():
self.skool.next_lesson(self.ring_bell)
self.ring_bell = True
if self.skool.suspended:
return False
if self.cheat:
self._check_cheat_keys()
self.scroll = self.skool.move_characters()
if self.skool.suspended:
return False
self.skool.auto_shut_doors()
self.clock.tick(self.screen.fps * self.speed)
self.skool.draw()
self.skool.scroll(self.scroll, self.clock)
if self.keyboard.was_pressed(keys.MENU, force_check=True):
self.menu = self.menus['Main']
self.menu.reset()
self.screen.draw_menu(self.menu)
elif self.keyboard.was_pressed(keys.SAVE, force_check=True):
self._save_game()
elif self.keyboard.was_pressed(keys.LOAD, force_check=True):
self._load_last()
return False
class Menu:
"""The in-game menu.
:type screen: :class:`~pyskool.graphics.Screen`
:param screen: The screen (to draw the menu on).
:type config: dict
:param config: Configuration parameters.
:param items: The menu items (`(operation, label)` tuples).
"""
def __init__(self, screen, config, items):
self.ink = config.get('Ink', (255, 255, 255))
self.paper = config.get('Paper', (255, 0, 0))
self.highlight = config.get('Highlight', (200, 0, 0))
self.status_bar = config.get('StatusBar', 1)
self.status_paper = config.get('StatusPaper', (100, 100, 100))
self.title_paper = config.get('TitlePaper', (100, 100, 100))
self.width = config.get('Width', 0.9)
self.title_text = config.get('Title', 'Menu')
self.alpha = config.get('Alpha', 224)
self.items = items
self.build_images(screen)
self.reset()
def reset(self):
"""Reset the menu. Specifically:
* set the selected index to 0
* clear the status text
* clear the backdrop on which the menu is drawn
"""
self.selected_index = 0
self.status = ''
self.backdrop = None
def build_images(self, screen):
"""Build the title image and menu item images."""
self.title = screen.get_text(self.title_text, self.ink, self.title_paper)
self.images = []
for operation, label in self.items:
self.images.append(screen.get_text(label, self.ink, self.paper))
self.backdrop = None
def get_operation(self, keyboard):
"""Return the operation to perform (which may be `None`)."""
operation = None
if keyboard.was_pressed(keys.MENU_EXIT, force_check=True):
return RESUME
if keyboard.was_pressed(keys.MENU_PREV, force_check=True):
self.selected_index -= 1
operation = UPDATE
elif keyboard.was_pressed(keys.MENU_NEXT, force_check=True):
self.selected_index += 1
operation = UPDATE
elif keyboard.was_pressed(keys.MENU_EXEC, force_check=True):
operation = self.items[self.selected_index][0]
self.selected_index = self.selected_index % len(self.items)
return operation
| gpl-3.0 | -2,794,995,661,869,815,300 | 36.769088 | 131 | 0.580613 | false |
TomasTomecek/osbs | osbs/exceptions.py | 1 | 1766 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Exceptions raised by OSBS
"""
from traceback import format_tb
class OsbsException(Exception):
def __init__(self, message=None, cause=None, traceback=None):
if message is None and cause is not None:
message = repr(cause)
super(OsbsException, self).__init__(message)
self.message = message
self.cause = cause
self.traceback = traceback
def __str__(self):
if self.cause and self.traceback and not hasattr(self, '__context__'):
return ("%s\n\n" % self.message +
"Original traceback (most recent call last):\n" +
"".join(format_tb(self.traceback)) +
"%r" % self.cause)
else:
return super(OsbsException, self).__str__()
def __repr__(self):
if self.cause and not hasattr(self, '__context__'):
return "OsbsException caused by %r" % self.cause
else:
return super(OsbsException, self).__repr__()
class OsbsResponseException(OsbsException):
""" OpenShift didn't respond with OK (200) status """
def __init__(self, message, status_code, *args, **kwargs):
super(OsbsResponseException, self).__init__(message, *args, **kwargs)
self.status_code = status_code
class OsbsNetworkException(OsbsException):
def __init__(self, url, message, status_code, *args, **kwargs):
super(OsbsNetworkException, self).__init__(message, *args, **kwargs)
self.url = url
self.status_code = status_code
class OsbsValidationException(OsbsException):
pass
| bsd-3-clause | -7,734,197,998,496,036,000 | 29.982456 | 78 | 0.61778 | false |
baderj/domain_generation_algorithms | proslikefan/dga.py | 1 | 1425 | import argparse
from ctypes import c_int
from datetime import datetime
def dga(date, magic, tlds):
# tlds = ["eu", "biz", "se", "info", "com", "net", "org", "ru", "in",
# "name"]
for i in range(10):
for tld in tlds:
seed_string = '.'.join([str(s) for s in
[magic, date.month, date.day, date.year, tld]])
r = abs(hash_string(seed_string)) + i
domain = ""
k = 0
while(k < r % 7 + 6):
r = abs(hash_string(domain + str(r)))
domain += chr(r % 26 + ord('a'))
k += 1
domain += '.' + tld
print(domain)
def hash_string(s):
h = c_int(0)
for c in s:
h.value = (h.value << 5) - h.value + ord(c)
return h.value
if __name__=="__main__":
""" known magic seeds are "prospect" and "OK" """
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--date", help="date for which to generate domains")
parser.add_argument("-m", "--magic", help="magic string",
default="prospect")
parser.add_argument("-t", "--tlds", nargs="+", help="tlds",
default=["eu", "biz", "se", "info", "com", "net", "org", "ru", "in", "name"])
args = parser.parse_args()
if args.date:
d = datetime.strptime(args.date, "%Y-%m-%d")
else:
d = datetime.now()
dga(d, args.magic, args.tlds)
| gpl-2.0 | -7,406,160,187,670,002,000 | 32.139535 | 85 | 0.490526 | false |
fujicoin/fujicoin | test/functional/wallet_listreceivedby.py | 1 | 8103 | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listreceivedbyaddress RPC."""
from decimal import Decimal
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import test_address
class ReceivedByTest(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_cli()
def run_test(self):
# Generate block to get out of IBD
self.nodes[0].generate(1)
self.sync_blocks()
# save the number of coinbase reward addresses so far
num_cb_reward_addresses = len(self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True))
self.log.info("listreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check not listed in listreceivedbyaddress because has 0 confirmations
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{},
True)
# Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
assert_array_result(self.nodes[1].listreceivedbyaddress(),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence < 10
assert_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address": addr},
{"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]})
# With min confidence > 10, should not find Tx
assert_array_result(self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True)
# Empty Tx
empty_addr = self.nodes[1].getnewaddress()
assert_array_result(self.nodes[1].listreceivedbyaddress(0, True),
{"address": empty_addr},
{"address": empty_addr, "label": "", "amount": 0, "confirmations": 0, "txids": []})
# Test Address filtering
# Only on addr
expected = {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}
res = self.nodes[1].listreceivedbyaddress(minconf=0, include_empty=True, include_watchonly=True, address_filter=addr)
assert_array_result(res, {"address": addr}, expected)
assert_equal(len(res), 1)
# Test for regression on CLI calls with address string (#14173)
cli_res = self.nodes[1].cli.listreceivedbyaddress(0, True, True, addr)
assert_array_result(cli_res, {"address": addr}, expected)
assert_equal(len(cli_res), 1)
# Error on invalid address
assert_raises_rpc_error(-4, "address_filter parameter was invalid", self.nodes[1].listreceivedbyaddress, minconf=0, include_empty=True, include_watchonly=True, address_filter="bamboozling")
# Another address receive money
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 2 + num_cb_reward_addresses) # Right now 2 entries
other_addr = self.nodes[1].getnewaddress()
txid2 = self.nodes[0].sendtoaddress(other_addr, 0.1)
self.nodes[0].generate(1)
self.sync_all()
# Same test as above should still pass
expected = {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 11, "txids": [txid, ]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, addr)
assert_array_result(res, {"address": addr}, expected)
assert_equal(len(res), 1)
# Same test as above but with other_addr should still pass
expected = {"address": other_addr, "label": "", "amount": Decimal("0.1"), "confirmations": 1, "txids": [txid2, ]}
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_array_result(res, {"address": other_addr}, expected)
assert_equal(len(res), 1)
# Should be two entries though without filter
res = self.nodes[1].listreceivedbyaddress(0, True, True)
assert_equal(len(res), 3 + num_cb_reward_addresses) # Became 3 entries
# Not on random addr
other_addr = self.nodes[0].getnewaddress() # note on node[0]! just a random addr
res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr)
assert_equal(len(res), 0)
self.log.info("getreceivedbyaddress Test")
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.0"))
# Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr, 0)
assert_equal(balance, Decimal("0.1"))
# Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].generate(10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
assert_equal(balance, Decimal("0.1"))
# Trying to getreceivedby for an address the wallet doesn't own should return an error
assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr)
self.log.info("listreceivedbylabel + getreceivedbylabel Test")
# set pre-state
label = ''
address = self.nodes[1].getnewaddress()
test_address(self.nodes[1], address, labels=[label])
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0]
balance_by_label = self.nodes[1].getreceivedbylabel(label)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbylabel should return received_by_label_json because of 0 confirmations
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
received_by_label_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label)
self.nodes[1].generate(10)
self.sync_all()
# listreceivedbylabel should return updated received list
assert_array_result(self.nodes[1].listreceivedbylabel(),
{"label": label},
{"label": received_by_label_json["label"], "amount": (received_by_label_json["amount"] + Decimal("0.1"))})
# getreceivedbylabel should return updated receive total
balance = self.nodes[1].getreceivedbylabel(label)
assert_equal(balance, balance_by_label + Decimal("0.1"))
# Create a new label named "mynewlabel" that has a 0 balance
address = self.nodes[1].getnewaddress()
self.nodes[1].setlabel(address, "mynewlabel")
received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel(0, True) if r["label"] == "mynewlabel"][0]
# Test includeempty of listreceivedbylabel
assert_equal(received_by_label_json["amount"], Decimal("0.0"))
# Test getreceivedbylabel for 0 amount labels
balance = self.nodes[1].getreceivedbylabel("mynewlabel")
assert_equal(balance, Decimal("0.0"))
if __name__ == '__main__':
ReceivedByTest().main()
| mit | -8,886,061,812,897,744,000 | 46.385965 | 197 | 0.623473 | false |
maoy/zknova | nova/api/openstack/compute/contrib/security_groups.py | 1 | 21752 | # Copyright 2011 OpenStack LLC.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
from xml.dom import minidom
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def make_rule(elem):
elem.set('id')
elem.set('parent_group_id')
proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
proto.text = 'ip_protocol'
from_port = xmlutil.SubTemplateElement(elem, 'from_port')
from_port.text = 'from_port'
to_port = xmlutil.SubTemplateElement(elem, 'to_port')
to_port.text = 'to_port'
group = xmlutil.SubTemplateElement(elem, 'group', selector='group')
name = xmlutil.SubTemplateElement(group, 'name')
name.text = 'name'
tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id')
tenant_id.text = 'tenant_id'
ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
selector='ip_range')
cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
cidr.text = 'cidr'
def make_sg(elem):
elem.set('id')
elem.set('tenant_id')
elem.set('name')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
rules = xmlutil.SubTemplateElement(elem, 'rules')
rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules')
make_rule(rule)
sg_nsmap = {None: wsgi.XMLNS_V11}
class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_rule',
selector='security_group_rule')
make_rule(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group',
selector='security_group')
make_sg(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_groups')
elem = xmlutil.SubTemplateElement(root, 'security_group',
selector='security_groups')
make_sg(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = minidom.parseString(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
if sg_node is not None:
if sg_node.hasAttribute('name'):
security_group['name'] = sg_node.getAttribute('name')
desc_node = self.find_first_child_named(sg_node,
"description")
if desc_node:
security_group['description'] = self.extract_text(desc_node)
return {'body': {'security_group': security_group}}
class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = minidom.parseString(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
def _extract_security_group_rule(self, node):
"""Marshal the security group rule attribute of a parsed request."""
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_rule')
if sg_rule_node is not None:
ip_protocol_node = self.find_first_child_named(sg_rule_node,
"ip_protocol")
if ip_protocol_node is not None:
sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
from_port_node = self.find_first_child_named(sg_rule_node,
"from_port")
if from_port_node is not None:
sg_rule['from_port'] = self.extract_text(from_port_node)
to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
if to_port_node is not None:
sg_rule['to_port'] = self.extract_text(to_port_node)
parent_group_id_node = self.find_first_child_named(sg_rule_node,
"parent_group_id")
if parent_group_id_node is not None:
sg_rule['parent_group_id'] = self.extract_text(
parent_group_id_node)
group_id_node = self.find_first_child_named(sg_rule_node,
"group_id")
if group_id_node is not None:
sg_rule['group_id'] = self.extract_text(group_id_node)
cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
if cidr_node is not None:
sg_rule['cidr'] = self.extract_text(cidr_node)
return sg_rule
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = NativeSecurityGroupAPI()
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
source_group = self.security_group_api.get(context,
id=rule['group_id'])
sg_rule['group'] = {'name': source_group.name,
'tenant_id': source_group.project_id}
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
security_group['rules'] += [self._format_security_group_rule(
context, rule)]
return security_group
def _authorize_context(self, req):
context = req.environ['nova.context']
authorize(context)
return context
def _validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
raise exc.HTTPBadRequest(explanation=msg)
def _from_body(self, body, key):
if not body:
raise exc.HTTPUnprocessableEntity()
value = body.get(key, None)
if value is None:
raise exc.HTTPUnprocessableEntity()
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@wsgi.serializers(xml=SecurityGroupTemplate)
def show(self, req, id):
"""Return data about the given security group."""
context = self._authorize_context(req)
id = self._validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = self._authorize_context(req)
id = self._validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req):
"""Returns a list of security groups."""
context = self._authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
raw_groups = self.security_group_api.list(context,
project=context.project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.serializers(xml=SecurityGroupTemplate)
@wsgi.deserializers(xml=SecurityGroupXMLDeserializer)
def create(self, req, body):
"""Creates a new security group."""
context = self._authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create(context, group_name,
group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupRuleTemplate)
@wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer)
def create(self, req, body):
context = self._authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
parent_group_id = self._validate_id(sg_rule.get('parent_group_id',
None))
security_group = self.security_group_api.get(context, None,
parent_group_id, map_exception=True)
try:
values = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
if values is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
values['parent_group_id'] = security_group.id
if self.security_group_api.rule_exists(security_group, values):
msg = _('This rule already exists in group %s') % parent_group_id
raise exc.HTTPBadRequest(explanation=msg)
security_group_rule = self.security_group_api.add_rules(
context, parent_group_id, security_group['name'], [values])[0]
return {"security_group_rule": self._format_security_group_rule(
context,
security_group_rule)}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self._validate_id(group_id)
#check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = self._authorize_context(req)
id = self._validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule.parent_group_id
security_group = self.security_group_api.get(context, None, group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = self._authorize_context(req)
self.security_group_api.ensure_default(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
groups = db.security_group_get_by_instance(context, instance['id'])
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = NativeSecurityGroupAPI()
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
try:
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_servers(self, req, servers):
key = "security_groups"
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
resp_obj.attach(xml=SecurityGroupServerTemplate())
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
resp_obj.attach(xml=SecurityGroupServersTemplate())
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroupsTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return "security_groups" in datum
def make_server(elem):
secgrps = SecurityGroupsTemplateElement('security_groups')
elem.append(secgrps)
secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group',
selector="security_groups")
secgrp.set('name')
class SecurityGroupServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1)
class SecurityGroupServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1)
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2011-07-21T00:00:00+00:00"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
class NativeSecurityGroupAPI(compute_api.SecurityGroupAPI):
@staticmethod
def raise_invalid_property(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_group(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exc.HTTPNotFound(explanation=msg)
| apache-2.0 | 8,453,430,313,926,961,000 | 36.37457 | 79 | 0.594796 | false |
vaidap/zulip | zerver/views/home.py | 1 | 11809 | from __future__ import absolute_import
from typing import Any, List, Dict, Optional, Text
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpRequest
from django.shortcuts import redirect, render
from django.utils import translation
from django.utils.cache import patch_cache_control
from six.moves import zip_longest, zip, range
from zerver.decorator import zulip_login_required, process_client
from zerver.forms import ToSForm
from zerver.lib.realm_icon import realm_icon_url
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmDomain, \
RealmFilter, PreregistrationUser, UserActivity, \
UserPresence, get_recipient, name_changes_disabled, email_to_username, \
get_realm_domains
from zerver.lib.events import do_events_register
from zerver.lib.actions import update_user_presence, do_change_tos_version, \
do_update_pointer, realm_user_count
from zerver.lib.avatar import avatar_url
from zerver.lib.i18n import get_language_list, get_language_name, \
get_language_list_for_templates
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.lib.streams import access_stream_by_name
from zerver.lib.utils import statsd, get_subdomain
import calendar
import datetime
import logging
import os
import re
import simplejson
import time
@zulip_login_required
def accounts_accept_terms(request):
# type: (HttpRequest) -> HttpResponse
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
do_change_tos_version(request.user, settings.TOS_VERSION)
return redirect(home)
else:
form = ToSForm()
email = request.user.email
special_message_template = None
if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
special_message_template = 'zerver/' + settings.FIRST_TIME_TOS_TEMPLATE
return render(
request,
'zerver/accounts_accept_terms.html',
context={'form': form,
'email': email,
'special_message_template': special_message_template},
)
def sent_time_in_epoch_seconds(user_message):
# type: (Optional[UserMessage]) -> Optional[float]
if user_message is None:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
def home(request):
# type: (HttpRequest) -> HttpResponse
if settings.DEVELOPMENT and os.path.exists('var/handlebars-templates/compile.error'):
response = render(request, 'zerver/handlebars_compilation_failed.html')
response.status_code = 500
return response
if not settings.SUBDOMAINS_HOMEPAGE:
return home_real(request)
# If settings.SUBDOMAINS_HOMEPAGE, sends the user the landing
# page, not the login form, on the root domain
subdomain = get_subdomain(request)
if subdomain != "":
return home_real(request)
return render(request, 'zerver/hello.html')
@zulip_login_required
def home_real(request):
# type: (HttpRequest) -> HttpResponse
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
# If a user hasn't signed the current Terms of Service, send them there
if settings.TERMS_OF_SERVICE is not None and settings.TOS_VERSION is not None and \
int(settings.TOS_VERSION.split('.')[0]) > user_profile.major_tos_version():
return accounts_accept_terms(request)
narrow = [] # type: List[List[Text]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream_name = request.GET.get("stream")
(narrow_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, narrow_stream_name)
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_stream is not None and narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if user_profile.last_reminder is not None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
# Set default language and make it persist
default_language = register_ret['default_language']
url_lang = '/{}'.format(request.LANGUAGE_CODE)
if not request.path.startswith(url_lang):
translation.activate(default_language)
request.session[translation.LANGUAGE_SESSION_KEY] = default_language
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
# Server settings.
development_environment = settings.DEVELOPMENT,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
server_uri = settings.SERVER_URI,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
max_avatar_file_size = settings.MAX_AVATAR_FILE_SIZE,
server_generation = settings.SERVER_GENERATION,
use_websockets = settings.USE_WEBSOCKETS,
save_stacktraces = settings.SAVE_FRONTEND_STACKTRACES,
server_inline_image_preview = settings.INLINE_IMAGE_PREVIEW,
server_inline_url_embed_preview = settings.INLINE_URL_EMBED_PREVIEW,
password_min_length = settings.PASSWORD_MIN_LENGTH,
password_min_quality = settings.PASSWORD_MIN_ZXCVBN_QUALITY,
# Misc. extra data.
have_initial_messages = user_has_messages,
initial_servertime = time.time(), # Used for calculating relative presence age
default_language_name = get_language_name(register_ret['default_language']),
language_list_dbl_col = get_language_list_for_templates(register_ret['default_language']),
language_list = get_language_list(),
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
)
undesired_register_ret_fields = [
'streams',
]
for field_name in set(register_ret.keys()) - set(undesired_register_ret_fields):
page_params[field_name] = register_ret[field_name]
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
page_params["enable_desktop_notifications"] = False
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render(request, 'zerver/index.html',
context={'user_profile': user_profile,
'page_params': simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE_ENABLED,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.webathena_enabled,
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
},)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponseRedirect(reverse('zerver.views.home.home'))
def apps_view(request, _):
# type: (HttpRequest, Text) -> HttpResponse
if settings.ZILENCER_ENABLED:
return render(request, 'zerver/apps.html')
return HttpResponseRedirect('https://zulipchat.com/apps/', status=301)
def is_buggy_ua(agent):
# type: (str) -> bool
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
"Mac" not in agent
| apache-2.0 | -5,072,437,405,447,221,000 | 43.394737 | 109 | 0.65594 | false |
abhishekraok/GraphMap | graphmap/tree_viewer.py | 1 | 3451 | import imagetree
import serializer
from utilities import quadkey_to_xyz, xyz_to_quadkey, is_valid_quadkey
from serializer import create_tree_from_jpg_url
import commander
import constants
import sys
valid_commands = ['+', '-', 'l', 'r', 'u', 'd', 's']
def tree_viewer_valid_input(input_command):
return input_command in valid_commands or \
is_valid_quadkey(input_command) or \
input_command.startswith('c') or \
input_command.startswith('http')
def tree_viewer(tree):
"""
Interactively display the tree.
:type tree: imagetree.ImageTree
"""
import matplotlib.pyplot as plt
x, y, z = 0, 0, 0
im_array = tree.get_pil_image_at_xyz(x, y, z, constants.default_tile_resolution)
plt.imshow(im_array)
plt.draw()
plt.pause(1)
print('You can move around, connect to an existing image and insert image from url')
print('To move around please enter one of' + str(valid_commands))
print('To connect enter c <node_link> e.g. >c line@https://azurewebsite.line.tsv.gz')
print('To insert another image please input this\n<image url> <jpg_link> e.g. >http://imgur.com/haha.jpg smile')
input_command = raw_input('>')
while tree_viewer_valid_input(input_command):
if input_command == '+':
x *= 2
y *= 2
z += 1
if input_command == '-':
x /= 2
y /= 2
z -= 1
if input_command == 'l':
x -= 1
if input_command == 'r':
x += 1
if input_command == 'd':
y += 1
if input_command == 'u':
y -= 1
if input_command == 's':
quad_key = xyz_to_quadkey(x, y, z)
print('Saving at current location of ', quad_key, ' where filename is ', tree.filename)
serializer.save_tree(tree)
if is_valid_quadkey(input_command):
x, y, z = quadkey_to_xyz(quadkey=input_command)
current_quadkey = xyz_to_quadkey(x, y, z)
if input_command.startswith('c'):
_, node_link = input_command.split(' ')
quad_key = current_quadkey
print('connecting node link', node_link, ' at quadkey ', quad_key)
commander.insert_tree_root(root_tree=tree, child_link=node_link, quad_key=quad_key, save=False)
if input_command.startswith('http'):
url, node_name = input_command.split(' ')
blob_url = 'https://artmapstore.blob.core.windows.net/firstnodes/' + node_name + '.tsv.gz'
quad_key = current_quadkey
print('Inserting image ', url, ' at quadkey ', quad_key)
another_tree = create_tree_from_jpg_url(url=url, name=tree.name, filename=blob_url,
max_resolution=1024)
tree.insert(another_tree, quad_key)
print(
'xyz=', x, y, z, '. quadkey=', current_quadkey, 'node link=', tree.get_descendant(current_quadkey).get_link())
im_array = tree.get_pil_image_at_xyz(x, y, z, constants.default_tile_resolution)
plt.imshow(im_array)
plt.draw()
plt.pause(1)
input_command = raw_input('>')
if __name__ == '__main__':
print('Tree viewer with options ', sys.argv)
if len(sys.argv) > 1:
node_link = sys.argv[1]
else:
node_link = constants.ROOT_LINK
tree = serializer.load_link_new_serializer(node_link)
tree_viewer(tree)
| apache-2.0 | -6,180,173,991,250,044,000 | 37.775281 | 118 | 0.578093 | false |
Denvi/FlatCAM | FlatCAMWorker.py | 1 | 1587 | from PyQt4 import QtCore
class Worker(QtCore.QObject):
"""
Implements a queue of tasks to be carried out in order
in a single independent thread.
"""
# avoid multiple tests for debug availability
pydevd_failed = False
task_completed = QtCore.pyqtSignal(str)
def __init__(self, app, name=None):
super(Worker, self).__init__()
self.app = app
self.name = name
def allow_debug(self):
"""
allow debuging/breakpoints in this threads
should work from PyCharm and PyDev
:return:
"""
if not self.pydevd_failed:
try:
import pydevd
pydevd.settrace(suspend=False, trace_only_current_thread=True)
except ImportError:
self.pydevd_failed=True
def run(self):
# self.app.log.debug("Worker Started!")
self.allow_debug()
# Tasks are queued in the event listener.
self.app.worker_task.connect(self.do_worker_task)
def do_worker_task(self, task):
# self.app.log.debug("Running task: %s" % str(task))
self.allow_debug()
if ('worker_name' in task and task['worker_name'] == self.name) or \
('worker_name' not in task and self.name is None):
try:
task['fcn'](*task['params'])
except Exception as e:
self.app.thread_exception.emit(e)
raise e
finally:
self.task_completed.emit(self.name)
# self.app.log.debug("Task ignored.")
| mit | 5,585,728,055,401,166,000 | 25.898305 | 78 | 0.559546 | false |
beobal/cassandra-dtest | upgrade_tests/upgrade_through_versions_test.py | 1 | 37299 | import operator
import os
import pprint
import random
import signal
import time
import uuid
import logging
import pytest
import psutil
from collections import defaultdict, namedtuple
from multiprocessing import Process, Queue
from queue import Empty, Full
from cassandra import ConsistencyLevel, WriteTimeout
from cassandra.query import SimpleStatement
from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester
from tools.misc import generate_ssl_stores, new_node
from .upgrade_base import switch_jdks
from .upgrade_manifest import (build_upgrade_pairs, current_2_0_x,
current_2_1_x, current_2_2_x, current_3_0_x,
indev_2_2_x, indev_3_x)
logger = logging.getLogger(__name__)
def data_writer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for writing/rewriting data continuously.
Pushes to a queue to be consumed by data_checker.
Pulls from a queue of already-verified rows written by data_checker that it can overwrite.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE cf SET v=? WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
val = uuid.uuid4()
session.execute(prepared, (val, key))
to_verify_queue.put_nowait((key, val,))
except Exception:
logger.debug("Error in data writer process!")
to_verify_queue.close()
raise
def data_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking data continuously.
Pulls from a queue written to by data_writer to know what to verify.
Pushes to a queue to tell data_writer what's been verified and could be a candidate for re-writing.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT v FROM cf WHERE k=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_val) = to_verify_queue.get_nowait()
actual_val = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
logger.debug("Error in data verifier process!")
verification_done_queue.close()
raise
else:
try:
verification_done_queue.put_nowait(key)
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
tester.assertEqual(expected_val, actual_val, "Data did not match expected value!")
def counter_incrementer(tester, to_verify_queue, verification_done_queue, rewrite_probability=0):
"""
Process for incrementing counters continuously.
Pushes to a queue to be consumed by counter_checker.
Pulls from a queue of already-verified rows written by data_checker that it can increment again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("UPDATE countertable SET c = c + 1 WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
to_verify_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
key = None
count = 0 # this will get set to actual last known count if we do a re-write
if (rewrite_probability > 0) and (random.randint(0, 100) <= rewrite_probability):
try:
key, count = verification_done_queue.get_nowait()
except Empty:
# we wanted a re-write but the re-writable queue was empty. oh well.
pass
key = key or uuid.uuid4()
session.execute(prepared, (key))
to_verify_queue.put_nowait((key, count + 1,))
except Exception:
logger.debug("Error in counter incrementer process!")
to_verify_queue.close()
raise
def counter_checker(tester, to_verify_queue, verification_done_queue):
"""
Process for checking counters continuously.
Pulls from a queue written to by counter_incrementer to know what to verify.
Pushes to a queue to tell counter_incrementer what's been verified and could be a candidate for incrementing again.
Intended to be run using multiprocessing.
"""
# 'tester' is a cloned object so we shouldn't be inappropriately sharing anything with another process
session = tester.patient_cql_connection(tester.node1, keyspace="upgrade", protocol_version=tester.protocol_version)
prepared = session.prepare("SELECT c FROM countertable WHERE k1=?")
prepared.consistency_level = ConsistencyLevel.QUORUM
def handle_sigterm(signum, frame):
# need to close queue gracefully if possible, or the data_checker process
# can't seem to empty the queue and test failures result.
verification_done_queue.close()
exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
while True:
try:
# here we could block, but if the writer process terminates early with an empty queue
# we would end up blocking indefinitely
(key, expected_count) = to_verify_queue.get_nowait()
actual_count = session.execute(prepared, (key,))[0][0]
except Empty:
time.sleep(0.1) # let's not eat CPU if the queue is empty
continue
except Exception:
logger.debug("Error in counter verifier process!")
verification_done_queue.close()
raise
else:
tester.assertEqual(expected_count, actual_count, "Data did not match expected value!")
try:
verification_done_queue.put_nowait((key, actual_count))
except Full:
# the rewritable queue is full, not a big deal. drop this one.
# we keep the rewritable queue held to a modest max size
# and allow dropping some rewritables because we don't want to
# rewrite rows in the same sequence as originally written
pass
@pytest.mark.upgrade_test
@pytest.mark.resource_intensive
class TestUpgrade(Tester):
"""
Upgrades a 3-node Murmur3Partitioner cluster through versions specified in test_version_metas.
"""
test_version_metas = None # set on init to know which versions to use
subprocs = None # holds any subprocesses, for status checking and cleanup
extra_config = None # holds a non-mutable structure that can be cast as dict()
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
# This one occurs if we do a non-rolling upgrade, the node
# it's trying to send the migration to hasn't started yet,
# and when it does, it gets replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
r'RejectedExecutionException.*ThreadPoolExecutor has shut down',
# Occurs due to test/ccm writing topo on down nodes
r'Cannot update data center or rack from.*for live host',
# Normal occurance. See CASSANDRA-12026. Likely won't be needed after C* 4.0.
r'Unknown column cdc during deserialization',
)
def setUp(self):
logger.debug("Upgrade test beginning, setting CASSANDRA_VERSION to {}, and jdk to {}. (Prior values will be restored after test)."
.format(self.test_version_metas[0].version, self.test_version_metas[0].java_version))
os.environ['CASSANDRA_VERSION'] = self.test_version_metas[0].version
switch_jdks(self.test_version_metas[0].java_version)
super(TestUpgrade, self).setUp()
logger.debug("Versions to test (%s): %s" % (type(self), str([v.version for v in self.test_version_metas])))
def init_config(self):
Tester.init_config(self)
if self.extra_config is not None:
logger.debug("Setting extra configuration options:\n{}".format(
pprint.pformat(dict(self.extra_config), indent=4))
)
self.cluster.set_configuration_options(
values=dict(self.extra_config)
)
def test_parallel_upgrade(self):
"""
Test upgrading cluster all at once (requires cluster downtime).
"""
self.upgrade_scenario()
def test_rolling_upgrade(self):
"""
Test rolling upgrade of the cluster, so we have mixed versions part way through.
"""
self.upgrade_scenario(rolling=True)
def test_parallel_upgrade_with_internode_ssl(self):
"""
Test upgrading cluster all at once (requires cluster downtime), with internode ssl.
"""
self.upgrade_scenario(internode_ssl=True)
def test_rolling_upgrade_with_internode_ssl(self):
"""
Rolling upgrade test using internode ssl.
"""
self.upgrade_scenario(rolling=True, internode_ssl=True)
def upgrade_scenario(self, populate=True, create_schema=True, rolling=False, after_upgrade_call=(), internode_ssl=False):
# Record the rows we write as we go:
self.row_values = set()
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
if internode_ssl:
logger.debug("***using internode ssl***")
generate_ssl_stores(self.fixture_dtest_setup.test_path)
self.cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path)
if populate:
# Start with 3 node cluster
logger.debug('Creating cluster (%s)' % self.test_version_metas[0].version)
cluster.populate(3)
[node.start(use_jna=True, wait_for_binary_proto=True) for node in cluster.nodelist()]
else:
logger.debug("Skipping cluster creation (should already be built)")
# add nodes to self for convenience
for i, node in enumerate(cluster.nodelist(), 1):
node_name = 'node' + str(i)
setattr(self, node_name, node)
if create_schema:
if rolling:
self._create_schema_for_rolling()
else:
self._create_schema()
else:
logger.debug("Skipping schema creation (should already be built)")
time.sleep(5) # sigh...
self._log_current_ver(self.test_version_metas[0])
if rolling:
# start up processes to write and verify data
write_proc, verify_proc, verification_queue = self._start_continuous_write_and_verify(wait_for_rowcount=5000)
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
for num, node in enumerate(self.cluster.nodelist()):
# sleep (sigh) because driver needs extra time to keep up with topo and make quorum possible
# this is ok, because a real world upgrade would proceed much slower than this programmatic one
# additionally this should provide more time for timeouts and other issues to crop up as well, which we could
# possibly "speed past" in an overly fast upgrade test
time.sleep(60)
self.upgrade_to_version(version_meta, partial=True, nodes=(node,), internode_ssl=internode_ssl)
self._check_on_subprocs(self.fixture_dtest_setup.subprocs)
logger.debug('Successfully upgraded %d of %d nodes to %s' %
(num + 1, len(self.cluster.nodelist()), version_meta.version))
self.cluster.set_install_dir(version=version_meta.version)
# Stop write processes
write_proc.terminate()
# wait for the verification queue's to empty (and check all rows) before continuing
self._wait_until_queue_condition('writes pending verification', verification_queue, operator.le, 0, max_wait_s=1200)
self._check_on_subprocs([verify_proc]) # make sure the verification processes are running still
self._terminate_subprocs()
# not a rolling upgrade, do everything in parallel:
else:
# upgrade through versions
for version_meta in self.test_version_metas[1:]:
self._write_values()
self._increment_counters()
self.upgrade_to_version(version_meta, internode_ssl=internode_ssl)
self.cluster.set_install_dir(version=version_meta.version)
self._check_values()
self._check_counters()
self._check_select_count()
# run custom post-upgrade callables
for call in after_upgrade_call:
call()
logger.debug('All nodes successfully upgraded to %s' % version_meta.version)
self._log_current_ver(version_meta)
cluster.stop()
def tearDown(self):
# just to be super sure we get cleaned up
self._terminate_subprocs()
super(TestUpgrade, self).tearDown()
def _check_on_subprocs(self, subprocs):
"""
Check on given subprocesses.
If any are not alive, we'll go ahead and terminate any remaining alive subprocesses since this test is going to fail.
"""
subproc_statuses = [s.is_alive() for s in subprocs]
if not all(subproc_statuses):
message = "A subprocess has terminated early. Subprocess statuses: "
for s in subprocs:
message += "{name} (is_alive: {aliveness}), ".format(name=s.name, aliveness=s.is_alive())
message += "attempting to terminate remaining subprocesses now."
self._terminate_subprocs()
raise RuntimeError(message)
def _terminate_subprocs(self):
for s in self.fixture_dtest_setup.subprocs:
if s.is_alive():
try:
psutil.Process(s.pid).kill() # with fire damnit
except Exception:
logger.debug("Error terminating subprocess. There could be a lingering process.")
pass
def upgrade_to_version(self, version_meta, partial=False, nodes=None, internode_ssl=False):
"""
Upgrade Nodes - if *partial* is True, only upgrade those nodes
that are specified by *nodes*, otherwise ignore *nodes* specified
and upgrade all nodes.
"""
logger.debug('Upgrading {nodes} to {version}'.format(nodes=[n.name for n in nodes] if nodes is not None else 'all nodes', version=version_meta.version))
switch_jdks(version_meta.java_version)
logger.debug("JAVA_HOME: " + os.environ.get('JAVA_HOME'))
if not partial:
nodes = self.cluster.nodelist()
for node in nodes:
logger.debug('Shutting down node: ' + node.name)
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
for node in nodes:
node.set_install_dir(version=version_meta.version)
logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
if internode_ssl and version_meta.version >= '4.0':
node.set_configuration_options({'server_encryption_options': {'enabled': True, 'enable_legacy_ssl_storage_port': True}})
# hacky? yes. We could probably extend ccm to allow this publicly.
# the topology file needs to be written before any nodes are started
# otherwise they won't be grouped into dc's properly for multi-dc tests
self.cluster._Cluster__update_topology_files()
# Restart nodes on new version
for node in nodes:
logger.debug('Starting %s on new version (%s)' % (node.name, version_meta.version))
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=240, wait_for_binary_proto=True)
node.nodetool('upgradesstables -a')
def _log_current_ver(self, current_version_meta):
"""
Logs where we currently are in the upgrade path, surrounding the current branch/tag, like ***sometag***
"""
vers = [m.version for m in self.test_version_metas]
curr_index = vers.index(current_version_meta.version)
logger.debug(
"Current upgrade path: {}".format(
vers[:curr_index] + ['***' + current_version_meta.version + '***'] + vers[curr_index + 1:]))
def _create_schema_for_rolling(self):
"""
Slightly different schema variant for testing rolling upgrades with quorum reads/writes.
"""
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':3};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k uuid PRIMARY KEY, v uuid )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 uuid,
c counter,
PRIMARY KEY (k1)
);""")
def _create_schema(self):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'SimpleStrategy', 'replication_factor':2};")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY, v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def _write_values(self, num=100):
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade")
for i in range(num):
x = len(self.row_values) + 1
session.execute("UPDATE cf SET v='%d' WHERE k=%d" % (x, x))
self.row_values.add(x)
def _check_values(self, consistency_level=ConsistencyLevel.ALL):
for node in self.cluster.nodelist():
session = self.patient_cql_connection(node, protocol_version=self.protocol_version)
session.execute("use upgrade")
for x in self.row_values:
query = SimpleStatement("SELECT k,v FROM cf WHERE k=%d" % x, consistency_level=consistency_level)
result = session.execute(query)
k, v = result[0]
assert x == k
assert str(x) == v
def _wait_until_queue_condition(self, label, queue, opfunc, required_len, max_wait_s=600):
"""
Waits up to max_wait_s for queue size to return True when evaluated against a condition function from the operator module.
Label is just a string identifier for easier debugging.
On Mac OS X may not be able to check queue size, in which case it will not block.
If time runs out, raises RuntimeError.
"""
wait_end_time = time.time() + max_wait_s
while time.time() < wait_end_time:
try:
qsize = queue.qsize()
except NotImplementedError:
logger.debug("Queue size may not be checkable on Mac OS X. Test will continue without waiting.")
break
if opfunc(qsize, required_len):
logger.debug("{} queue size ({}) is '{}' to {}. Continuing.".format(label, qsize, opfunc.__name__, required_len))
break
if divmod(round(time.time()), 30)[1] == 0:
logger.debug("{} queue size is at {}, target is to reach '{}' {}".format(label, qsize, opfunc.__name__, required_len))
time.sleep(0.1)
continue
else:
raise RuntimeError("Ran out of time waiting for queue size ({}) to be '{}' to {}. Aborting.".format(qsize, opfunc.__name__, required_len))
def _start_continuous_write_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a writer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are rewrite candidates).
wait_for_rowcount provides a number of rows to write before unblocking and continuing.
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
writer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
writer.daemon = True
self.fixture_dtest_setup.subprocs.append(writer)
writer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('rows written (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
verifier.daemon = True
self.fixture_dtest_setup.subprocs.append(verifier)
verifier.start()
return writer, verifier, to_verify_queue
def _start_continuous_counter_increment_and_verify(self, wait_for_rowcount=0, max_wait_s=600):
"""
Starts a counter incrementer process, a verifier process, a queue to track writes,
and a queue to track successful verifications (which are re-increment candidates).
Returns the writer process, verifier process, and the to_verify_queue.
"""
# queue of writes to be verified
to_verify_queue = Queue()
# queue of verified writes, which are update candidates
verification_done_queue = Queue(maxsize=500)
incrementer = Process(target=data_writer, args=(self, to_verify_queue, verification_done_queue, 25))
# daemon subprocesses are killed automagically when the parent process exits
incrementer.daemon = True
self.fixture_dtest_setup.subprocs.append(incrementer)
incrementer.start()
if wait_for_rowcount > 0:
self._wait_until_queue_condition('counters incremented (but not verified)', to_verify_queue, operator.ge, wait_for_rowcount, max_wait_s=max_wait_s)
count_verifier = Process(target=data_checker, args=(self, to_verify_queue, verification_done_queue))
# daemon subprocesses are killed automagically when the parent process exits
count_verifier.daemon = True
self.fixture_dtest_setup.subprocs.append(count_verifier)
count_verifier.start()
return incrementer, count_verifier, to_verify_queue
def _increment_counters(self, opcount=25000):
logger.debug("performing {opcount} counter increments".format(opcount=opcount))
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
update_counter_query = ("UPDATE countertable SET c = c + 1 WHERE k1='{key1}' and k2={key2}")
self.expected_counts = {}
for i in range(10):
self.expected_counts[uuid.uuid4()] = defaultdict(int)
fail_count = 0
for i in range(opcount):
key1 = random.choice(list(self.expected_counts.keys()))
key2 = random.randint(1, 10)
try:
query = SimpleStatement(update_counter_query.format(key1=key1, key2=key2), consistency_level=ConsistencyLevel.ALL)
session.execute(query)
except WriteTimeout:
fail_count += 1
else:
self.expected_counts[key1][key2] += 1
if fail_count > 100:
break
assert fail_count, 100 < "Too many counter increment failures"
def _check_counters(self):
logger.debug("Checking counter values...")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
for key1 in list(self.expected_counts.keys()):
for key2 in list(self.expected_counts[key1].keys()):
expected_value = self.expected_counts[key1][key2]
query = SimpleStatement("SELECT c from countertable where k1='{key1}' and k2={key2};".format(key1=key1, key2=key2),
consistency_level=ConsistencyLevel.ONE)
results = session.execute(query)
if results is not None:
actual_value = results[0][0]
else:
# counter wasn't found
actual_value = None
assert actual_value == expected_value
def _check_select_count(self, consistency_level=ConsistencyLevel.ALL):
logger.debug("Checking SELECT COUNT(*)")
session = self.patient_cql_connection(self.node2, protocol_version=self.protocol_version)
session.execute("use upgrade;")
expected_num_rows = len(self.row_values)
countquery = SimpleStatement("SELECT COUNT(*) FROM cf;", consistency_level=consistency_level)
result = session.execute(countquery)
if result is not None:
actual_num_rows = result[0][0]
assert actual_num_rows == expected_num_rows, "SELECT COUNT(*) returned %s when expecting %s" % (actual_num_rows, expected_num_rows)
else:
self.fail("Count query did not return")
class BootstrapMixin(object):
"""
Can be mixed into UpgradeTester or a subclass thereof to add bootstrap tests.
Using this class is not currently feasible on lengthy upgrade paths, as each
version bump adds a node and this will eventually exhaust resources.
"""
def _bootstrap_new_node(self):
# Check we can bootstrap a new node on the upgraded cluster:
logger.debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)))
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def _bootstrap_new_node_multidc(self):
# Check we can bootstrap a new node on the upgraded cluster:
logger.debug("Adding a node to the cluster")
nnode = new_node(self.cluster, remote_debug_port=str(2000 + len(self.cluster.nodes)), data_center='dc2')
nnode.start(use_jna=True, wait_other_notice=240, wait_for_binary_proto=True)
self._write_values()
self._increment_counters()
self._check_values()
self._check_counters()
def test_bootstrap(self):
# try and add a new node
self.upgrade_scenario(after_upgrade_call=(self._bootstrap_new_node,))
def test_bootstrap_multidc(self):
# try and add a new node
# multi dc, 2 nodes in each dc
cluster = self.cluster
if cluster.version() >= '3.0':
cluster.set_configuration_options({'enable_user_defined_functions': 'true',
'enable_scripted_user_defined_functions': 'true'})
elif cluster.version() >= '2.2':
cluster.set_configuration_options({'enable_user_defined_functions': 'true'})
cluster.populate([2, 2])
[node.start(use_jna=True, wait_for_binary_proto=True) for node in self.cluster.nodelist()]
self._multidc_schema_create()
self.upgrade_scenario(populate=False, create_schema=False, after_upgrade_call=(self._bootstrap_new_node_multidc,))
def _multidc_schema_create(self):
session = self.patient_cql_connection(self.cluster.nodelist()[0], protocol_version=self.protocol_version)
if self.cluster.version() >= '1.2':
# DDL for C* 1.2+
session.execute("CREATE KEYSPACE upgrade WITH replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':2};")
else:
# DDL for C* 1.1
session.execute("""CREATE KEYSPACE upgrade WITH strategy_class = 'NetworkTopologyStrategy'
AND strategy_options:'dc1':1
AND strategy_options:'dc2':2;
""")
session.execute('use upgrade')
session.execute('CREATE TABLE cf ( k int PRIMARY KEY , v text )')
session.execute('CREATE INDEX vals ON cf (v)')
session.execute("""
CREATE TABLE countertable (
k1 text,
k2 int,
c counter,
PRIMARY KEY (k1, k2)
);""")
def create_upgrade_class(clsname, version_metas, protocol_version,
bootstrap_test=False, extra_config=None):
"""
Dynamically creates a test subclass for testing the given versions.
'clsname' is the name of the new class.
'protocol_version' is an int.
'bootstrap_test' is a boolean, if True bootstrap testing will be included. Default False.
'version_list' is a list of versions ccm will recognize, to be upgraded in order.
'extra_config' is tuple of config options that can (eventually) be cast as a dict,
e.g. (('partitioner', org.apache.cassandra.dht.Murmur3Partitioner''))
"""
if extra_config is None:
extra_config = (('partitioner', 'org.apache.cassandra.dht.Murmur3Partitioner'),)
if bootstrap_test:
parent_classes = (TestUpgrade, BootstrapMixin)
else:
parent_classes = (TestUpgrade,)
# short names for debug output
parent_class_names = [cls.__name__ for cls in parent_classes]
print("Creating test class {} ".format(clsname))
print(" for C* versions:\n{} ".format(pprint.pformat(version_metas)))
print(" using protocol: v{}, and parent classes: {}".format(protocol_version, parent_class_names))
print(" to run these tests alone, use `nosetests {}.py:{}`".format(__name__, clsname))
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or version_metas[-1].matches_current_env_version_family
if not upgrade_applies_to_env:
pytest.mark.skip(reason='test not applicable to env.')
newcls = type(
clsname,
parent_classes,
{'test_version_metas': version_metas, '__test__': True, 'protocol_version': protocol_version, 'extra_config': extra_config}
)
if clsname in globals():
raise RuntimeError("Class by name already exists!")
globals()[clsname] = newcls
return newcls
MultiUpgrade = namedtuple('MultiUpgrade', ('name', 'version_metas', 'protocol_version', 'extra_config'))
MULTI_UPGRADES = (
# Proto v1 upgrades (v1 supported on 2.0, 2.1, 2.2)
MultiUpgrade(name='ProtoV1Upgrade_AllVersions_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=1, extra_config=None),
MultiUpgrade(name='ProtoV1Upgrade_AllVersions_RandomPartitioner_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=1,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v2 upgrades (v2 is supported on 2.0, 2.1, 2.2)
MultiUpgrade(name='ProtoV2Upgrade_AllVersions_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=2, extra_config=None),
MultiUpgrade(name='ProtoV2Upgrade_AllVersions_RandomPartitioner_EndsAt_indev_2_2_x',
version_metas=[current_2_0_x, current_2_1_x, indev_2_2_x], protocol_version=2,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v3 upgrades (v3 is supported on 2.1, 2.2, 3.0, 3.1, trunk)
MultiUpgrade(name='ProtoV3Upgrade_AllVersions_EndsAt_Trunk_HEAD',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_x], protocol_version=3, extra_config=None),
MultiUpgrade(name='ProtoV3Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
version_metas=[current_2_1_x, current_2_2_x, current_3_0_x, indev_3_x], protocol_version=3,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
# Proto v4 upgrades (v4 is supported on 2.2, 3.0, 3.1, trunk)
MultiUpgrade(name='ProtoV4Upgrade_AllVersions_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, indev_3_x], protocol_version=4, extra_config=None),
MultiUpgrade(name='ProtoV4Upgrade_AllVersions_RandomPartitioner_EndsAt_Trunk_HEAD',
version_metas=[current_2_2_x, current_3_0_x, indev_3_x], protocol_version=4,
extra_config=(
('partitioner', 'org.apache.cassandra.dht.RandomPartitioner'),
)),
)
for upgrade in MULTI_UPGRADES:
# if any version_metas are None, this means they are versions not to be tested currently
if all(upgrade.version_metas):
metas = upgrade.version_metas
if not RUN_STATIC_UPGRADE_MATRIX:
if metas[-1].matches_current_env_version_family:
# looks like this test should actually run in the current env, so let's set the final version to match the env exactly
oldmeta = metas[-1]
newmeta = oldmeta.clone_with_local_env_version()
logger.debug("{} appears applicable to current env. Overriding final test version from {} to {}".format(upgrade.name, oldmeta.version, newmeta.version))
metas[-1] = newmeta
create_upgrade_class(upgrade.name, [m for m in metas], protocol_version=upgrade.protocol_version, extra_config=upgrade.extra_config)
for pair in build_upgrade_pairs():
create_upgrade_class(
'Test' + pair.name,
[pair.starting_meta, pair.upgrade_meta],
protocol_version=pair.starting_meta.max_proto_v,
bootstrap_test=True
)
| apache-2.0 | 3,305,505,099,905,590,300 | 42.726846 | 168 | 0.627014 | false |
jlevy/ghizmo | ghizmo/main.py | 1 | 5270 | #!/usr/bin/env python
"""
If commands require input, it must be line-delimited JSON (e.g. quoted strings).
For further documentation, see: https://github.com/jlevy/ghizmo
"""
import logging as log
import sys
import os
import argparse
from ghizmo.commands.lib import to_bool
__author__ = 'jlevy'
NAME = "ghizmo"
VERSION = "0.2.0"
DESCRIPTION = "ghizmo: An extensible command line for GitHub"
LONG_DESCRIPTION = __doc__
def log_setup(level):
if level == log.DEBUG:
log.basicConfig(format="%(levelname).1s %(filename)20s:%(lineno)-4d %(message)s", level=level,
stream=sys.stderr)
else:
log.basicConfig(format="%(message)s", level=level, stream=sys.stderr)
def brief_excepthook(exctype, value, traceback):
print("error: %s" % value, file=sys.stderr)
print("(run with --debug for traceback info)", file=sys.stderr)
sys.exit(2)
sys.excepthook = brief_excepthook
class UserArgs(object):
"""
Assembles user-supplied arguments plus environment vars for convenient access.
Validation on types and on required arguments happens here.
"""
def __init__(self):
self._explicit_keys = []
self.dict = {}
def add_explicit(self, d):
self.dict.update(d)
self._explicit_keys.extend(list(d.keys()))
def add_implicit(self, d):
self.dict.update(d)
def get(self, item, default=None):
return self.dict[item] if item in self.dict else default
def get_bool(self, item, default=False):
return to_bool(self.get(item, default))
def get_int(self, item, default):
val = self.get(item, default)
return None if val is None else int(val)
def get_float(self, item, default):
val = self.get(item, default)
return None if val is None else float(val)
def __getattr__(self, item):
try:
return self.dict[item]
except KeyError:
raise AttributeError("Missing user-supplied argument '%s' (set with: --arg %s=VALUE)" % (item, item))
def __str__(self):
return ", ".join(["%s=%s" % (key, self.dict[key]) for key in self._explicit_keys])
def assemble_args(cmdline_args):
assembled = UserArgs()
user_arg_list = cmdline_args.arg or []
d = {}
for arg in user_arg_list:
try:
(key, value) = arg.split("=", 1)
except:
raise ValueError("Could not parse argument -- invalid format: '%s'" % arg)
d[key] = value
# Arguments are explicit, from command line, and from environment.
assembled.add_implicit(os.environ)
assembled.add_explicit(d)
assembled.add_explicit({
"dry_run": cmdline_args.dry_run,
"force": cmdline_args.force,
"format": cmdline_args.format
})
return assembled
def main():
# Bootstrap logging right up front, so we do it before assembling commands for help.
log_setup(log.DEBUG if "--debug" in sys.argv else log.WARN)
from ghizmo import ghizmo
from ghizmo import configs
command_directory = ghizmo.command_directory()
command_modules = sorted(set([module for (module, name, doc) in command_directory]))
command_docs = \
"command modules: %s\n" % ", ".join(command_modules) \
+ "(augment by adding to ./ghizmo_commands.py)\n\n" \
+ "commands available:\n" \
+ "\n".join([" %s: %s" % (name, doc) for (module, name, doc) in command_directory])
parser = argparse.ArgumentParser(description=DESCRIPTION,
epilog="\n" + __doc__ + "\n" + command_docs,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("command", help="%s command" % NAME, choices=ghizmo.list_commands())
parser.add_argument("--username", help="username to log in as")
parser.add_argument("--repo", help="repo of the form: owner/repo-name")
parser.add_argument("--debug", help="enable debugging output", action="store_true")
# Command arguments:
parser.add_argument("-f", "--force", help="enable debugging output", action="store_true")
parser.add_argument("-n", "--dry-run", help="dry run: log actions but don't do anything", action="store_true")
parser.add_argument("--format", help="output format", choices=["json", "yaml"])
parser.add_argument("-a", "--arg", help="argument of the form key=value (may repeat this)", action="append")
parser.add_argument('--version', action='version', version=VERSION)
args = parser.parse_args()
# Validate credentials and log in.
gh = ghizmo.login(username=args.username)
if not gh:
raise ValueError("Login failure")
# Validate repository.
owner = None
repo_name = None
if args.repo:
try:
(owner, repo_name) = args.repo.split("/")
except:
raise ValueError("Invalid repository (use format owner/repo-name): %s" % args.repo)
else:
try:
(owner, repo_name) = configs.infer_repo()
except:
log.debug("couldn't infer repository", exc_info=True)
repo = None
if owner and repo_name:
repo = gh.repository(owner, repo_name)
if not repo:
raise ValueError("Couldn't access repository: %s/%s" % (owner, repo_name))
# Assemble config for this run.
formatter = ghizmo.print_formatter(args.format)
config = ghizmo.Config(github=gh, repo=repo, formatter=formatter)
ghizmo.run_command(args.command, config, assemble_args(args))
if __name__ == '__main__':
main()
| apache-2.0 | 2,092,159,499,555,018,800 | 30.746988 | 112 | 0.659013 | false |
charleskorn/batect | wrapper/unix/test/tests.py | 1 | 7224 | #! /usr/bin/env python3
import http.server
import os
import shutil
import socket
import subprocess
import tempfile
import threading
import unittest
class WrapperScriptTests(unittest.TestCase):
http_port = 8080
default_download_url = "http://localhost:" + str(http_port) + "/test/testapp.jar"
def setUp(self):
self.start_server()
self.cache_dir = tempfile.mkdtemp()
def tearDown(self):
self.stop_server()
shutil.rmtree(self.cache_dir)
def test_first_run(self):
result = self.run_script(["arg 1", "arg 2"])
output = result.stdout.decode()
self.assertIn("Downloading batect", output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), output)
self.assertIn("I received 2 arguments.\narg 1\narg 2\n", output)
self.assertEqual(result.returncode, 0)
def test_second_run(self):
first_result = self.run_script(["arg 1", "arg 2"])
first_output = first_result.stdout.decode()
self.assertIn("Downloading batect", first_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), first_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), first_output)
self.assertIn("I received 2 arguments.\narg 1\narg 2\n", first_output)
self.assertEqual(first_result.returncode, 0)
second_result = self.run_script(["arg 3", "arg 4"])
second_output = second_result.stdout.decode()
self.assertNotIn("Downloading batect", second_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), second_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), second_output)
self.assertIn("I received 2 arguments.\narg 3\narg 4\n", second_output)
self.assertEqual(first_result.returncode, 0)
def test_download_fails(self):
result = self.run_script(["arg 1", "arg 2"], download_url=self.default_download_url + "-does-not-exist")
self.assertIn("Downloading batect", result.stdout.decode())
self.assertIn("404 File not found", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_download_is_not_quiet(self):
result = self.run_script([], download_url=self.default_download_url, quiet_download="false")
result_output = result.stdout.decode()
self.assertIn("Downloading batect", result_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), result_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), result_output)
self.assertIn("#", result_output)
self.assertEqual(result.returncode, 0)
def test_download_is_quiet(self):
result = self.run_script([], download_url=self.default_download_url, quiet_download="true")
result_output = result.stdout.decode()
self.assertIn("Downloading batect", result_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), result_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), result_output)
self.assertNotIn("#", result_output)
self.assertEqual(result.returncode, 0)
def test_no_curl(self):
path_dir = self.create_limited_path(["/usr/bin/basename", "/usr/bin/dirname"])
result = self.run_script([], path=path_dir)
self.assertIn("curl is not installed or not on your PATH. Please install it and try again.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_no_java(self):
path_dir = self.create_limited_path(["/usr/bin/basename", "/usr/bin/dirname", "/usr/bin/curl"])
result = self.run_script([], path=path_dir)
self.assertIn("Java is not installed or not on your PATH. Please install it and try again.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_unsupported_java(self):
path_dir = self.create_limited_path_for_specific_java_version("7")
result = self.run_script([], path=path_dir)
self.assertIn("The version of Java that is available on your PATH is version 1.7, but version 1.8 or greater is required.\n" +
"If you have a newer version of Java installed, please make sure your PATH is set correctly.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_supported_java(self):
for version in ["8", "9", "10", "11"]:
with self.subTest(java_version=version):
path_dir = self.create_limited_path_for_specific_java_version(version)
result = self.run_script([], path=path_dir)
self.assertIn("The Java application has started.", result.stdout.decode())
self.assertEqual(result.returncode, 0)
def test_non_zero_exit(self):
result = self.run_script(["exit-non-zero"])
output = result.stdout.decode()
self.assertIn("The Java application has started.", output)
self.assertNotIn("WARNING: you should never see this", output)
self.assertEqual(result.returncode, 123)
def create_limited_path_for_specific_java_version(self, java_version):
return self.create_limited_path([
"/usr/bin/basename",
"/usr/bin/dirname",
"/usr/bin/curl",
"/usr/bin/head",
"/usr/lib/jvm/java-{}-openjdk-amd64/bin/java".format(java_version),
])
def create_limited_path(self, executables):
path_dir = tempfile.mkdtemp()
self.addCleanup(lambda: shutil.rmtree(path_dir))
for executable in executables:
base_name = os.path.basename(executable)
os.symlink(executable, os.path.join(path_dir, base_name))
return path_dir + ":/bin"
def run_script(self, args, download_url=default_download_url, path=os.environ["PATH"], quiet_download=None):
env = {
"BATECT_CACHE_DIR": self.cache_dir,
"BATECT_DOWNLOAD_URL": download_url,
"PATH": path
}
if quiet_download is not None:
env["BATECT_QUIET_DOWNLOAD"] = quiet_download
path = self.get_script_path()
command = [path] + args
return subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
def get_script_dir(self):
return os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "src"))
def get_script_path(self):
return os.path.join(self.get_script_dir(), "template.sh")
def start_server(self):
self.server = http.server.HTTPServer(("", self.http_port), QuietHTTPHandler)
threading.Thread(target=self.server.serve_forever, daemon=True).start()
def stop_server(self):
self.server.shutdown()
self.server.server_close()
class QuietHTTPHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 5,619,094,956,247,502,000 | 39.58427 | 140 | 0.643411 | false |
engeens/pangolin | routes.py | 1 | 1523 | # -*- coding: utf-8 -*-
# This is an app-specific example router
#
# This simple router is used for setting languages from app/languages directory
# as a part of the application path: app/<lang>/controller/function
# Language from default.py or 'en' (if the file is not found) is used as
# a default_language
#
# See <web2py-root-dir>/examples/routes.parametric.example.py for parameter's detail
#-------------------------------------------------------------------------------------
# To enable this route file you must do the steps:
#
# 1. rename <web2py-root-dir>/examples/routes.parametric.example.py to routes.py
# 2. rename this APP/routes.example.py to APP/routes.py
# (where APP - is your application directory)
# 3. restart web2py (or reload routes in web2py admin interfase)
#
# YOU CAN COPY THIS FILE TO ANY APPLICATION'S ROOT DIRECTORY WITHOUT CHANGES!
from fileutils import abspath
from languages import read_possible_languages
possible_languages = read_possible_languages(abspath('applications', app, 'languages'))
#NOTE! app - is an application based router's parameter with name of an
# application. E.g.'welcome'
routers = {
app: dict(
default_language = possible_languages['default'][0],
languages = [lang for lang in possible_languages
if lang != 'default']
)
}
#NOTE! To change language in your application using these rules add this line
#in one of your models files:
# if request.uri_language: T.force(request.uri_language)
| gpl-3.0 | -6,541,904,693,397,986,000 | 39.078947 | 87 | 0.676297 | false |
demin-dmitriy/almost-haskell | src/AHLexerTest.py | 1 | 10390 | from unittest import TestCase
from AHLexer import *
# Tests and helper methods that are common for PreLexer and AHLexer
class CommonLexerTestCase:
class BasicErrorListener:
def syntaxError(self, recogn, sym, line, column, msg, exc):
raise LexerError("some error %s" % msg, line, column, None)
def setLexer(self, LexerClass, TokenClass):
self._LexerClass = LexerClass
self._TokenClass = TokenClass
def lex(self, str):
from antlr4 import InputStream
lexer = self._LexerClass(InputStream(str))
lexer.removeErrorListeners()
lexer.addErrorListener(CommonLexerTestCase.BasicErrorListener())
tokens = []
for token in iterUntil(lexer.nextToken,
lambda token: token.type == Token.EOF):
tokens.append(token)
return tokens
def checkOutput(self, outTokens, correctTokens):
self.assertEqual(len(outTokens), len(correctTokens))
for outToken, correctToken in zip(outTokens, correctTokens):
correctType, correctText = correctToken
if correctType is not None:
self.assertEqual(outToken.type, correctType)
if correctText is not None:
self.assertEqual(outToken.text, correctText)
# For debug purposes
def printOutput(self, outTokens):
for token in outTokens:
print(repr(token.text), '(' + str(token.type) + ')')
def testID(self):
test1 = self.lex(""" abc ->=∀x_:⊥]{-→2₂-- a"{d} b{--}e data1
""")
self.checkOutput(test1, [
(self._TokenClass.ID, 'abc'),
(self._TokenClass.ID, '->=∀x_:⊥]{-→2₂--'),
(self._TokenClass.ID, 'a"{d}'),
(self._TokenClass.ID, 'b{--}e'),
(self._TokenClass.ID, 'data1'),
(self._TokenClass.NEWLINE, '\n')
])
def testQualifiedName(self):
test1 = self.lex("a.b.c\n")
self.checkOutput(test1, [
(self._TokenClass.ID, 'a'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'b'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'c'),
(self._TokenClass.NEWLINE, '\n')
])
test2 = self.lex("cba . abc. de .f \n")
self.checkOutput(test2, [
(self._TokenClass.ID, 'cba'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'abc'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'de'),
(self._TokenClass.Dot, '.'),
(self._TokenClass.ID, 'f'),
(self._TokenClass.NEWLINE, '\n')
])
def testComments(self):
test1 = self.lex(
"""1 {- This
is
comment {- it
can be nested, if necessary
-}
-}
2 -- Only two newlines should have been generated -} {-
one{--}token no-comment--here
""")
self.checkOutput(test1, [
(self._TokenClass.ID, '1'),
(self._TokenClass.NEWLINE, '\n'),
(self._TokenClass.ID, '2'),
(self._TokenClass.NEWLINE, '\n'),
(self._TokenClass.ID, 'one{--}token'),
(self._TokenClass.ID, 'no-comment--here'),
(self._TokenClass.NEWLINE, '\n')
])
# 'where' is special because it affects indentation rules
def testCommonKeywords(self):
test1 = self.lex("(()data) module from import -> → => ⇒ = _ : {= :--\n")
self.checkOutput(test1, [
(self._TokenClass.LParen, '('),
(self._TokenClass.LParen, '('),
(self._TokenClass.RParen, ')'),
(self._TokenClass.Data, 'data'),
(self._TokenClass.RParen, ')'),
(self._TokenClass.Module, 'module'),
(self._TokenClass.From, 'from'),
(self._TokenClass.Import, 'import'),
(self._TokenClass.RArrow, '->'),
(self._TokenClass.RArrow, '→'),
(self._TokenClass.RDoubleArrow, '=>'),
(self._TokenClass.RDoubleArrow, '⇒'),
(self._TokenClass.Equal, '='),
(self._TokenClass.Underscore, '_'),
(self._TokenClass.Colon, ':'),
(self._TokenClass.ID, '{='),
(self._TokenClass.ID, ':--'),
(self._TokenClass.NEWLINE, '\n')
])
def testIllegalTabs(self):
for test in ("\t", "\v", "\f"):
self.assertRaises(LexerError, self.lex, test)
def testInvalidComment(self):
self.assertRaises(LexerError, self.lex, "abc {- ups!\n \n ")
self.assertRaises(LexerError, self.lex, "abc {- {- ouch!")
self.assertRaises(LexerError, self.lex,
"a where {- -} -- ...\n {- {--} oh, not again ")
class PreLexerTest(TestCase, CommonLexerTestCase):
def setUp(self):
CommonLexerTestCase.setLexer(self, PreLexer, PreLexer)
def testTokenWhere(self):
self.checkOutput(self.lex("where"), [(PreLexer.Where, 'where')])
def testNewlines(self):
test1 = self.lex(""" a
abc
{-
-}
def -- one
-- two
-- three
ghi""")
self.checkOutput(test1, [
(PreLexer.ID, 'a'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'abc'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'def'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'ghi')
])
test2 = self.lex("a \r\n b \r c \n d")
self.checkOutput(test2, [
(PreLexer.ID, 'a'),
(PreLexer.NEWLINE, '\r'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'b'),
(PreLexer.NEWLINE, '\r'),
(PreLexer.ID, 'c'),
(PreLexer.NEWLINE, '\n'),
(PreLexer.ID, 'd')
])
class AHLexerTest(TestCase, CommonLexerTestCase):
def setUp(self):
CommonLexerTestCase.setLexer(self, AHLexer, AHToken)
def testTokenWhere(self):
self.checkOutput(self.lex("where"), [
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.EndBlock, None)
])
test2 = self.lex("""
data ℕ₀ where
Zero : ℕ₀
Succ : ℕ₀ → ℕ₀
""")
self.checkOutput(test2, [
(AHToken.Data, 'data'),
(AHToken.ID, 'ℕ₀'),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.ID, 'Zero'),
(AHToken.Colon, ':'),
(AHToken.ID, 'ℕ₀'),
(AHToken.NEWLINE, '\n'),
(AHToken.ID, 'Succ'),
(AHToken.Colon, ':'),
(AHToken.ID, 'ℕ₀'),
(AHToken.RArrow, '→'),
(AHToken.ID, 'ℕ₀'),
(AHToken.NEWLINE, '\n'),
(AHToken.EndBlock, None)
])
test3 = self.lex("""
module where
a
where
b
c
d""")
self.checkOutput(test3, [
(AHToken.Module, 'module'),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.ID, 'a'),
(AHToken.NEWLINE, '\n'),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.ID, 'b'),
(AHToken.ID, 'c'),
(AHToken.NEWLINE, '\n'),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None),
(AHToken.ID, 'd'),
(AHToken.NEWLINE, None)
])
test4 = self.lex("""
where where
a
b where
c
""")
self.checkOutput(test4, [
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.ID, 'a'),
(AHToken.NEWLINE, '\n'),
(AHToken.EndBlock, None),
(AHToken.ID, 'b'),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None),
(AHToken.ID, 'c'),
(AHToken.NEWLINE, '\n')
])
test5 = self.lex("where where where")
self.checkOutput(test5, [
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None)
])
test6 = self.lex("where where where \n \n\n ")
self.checkOutput(test5, [
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.Where, 'where'),
(AHToken.BeginBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None),
(AHToken.EndBlock, None)
])
def testNewlines(self):
test1 = self.lex("a\n\n\n b")
self.checkOutput(test1, [
(AHToken.ID, 'a'),
(AHToken.ID, 'b'),
(AHToken.NEWLINE, None)])
test2 = self.lex("a\n\n\nb\n c\n\n d")
self.checkOutput(test2, [
(AHToken.ID, 'a'),
(AHToken.NEWLINE, '\n'),
(AHToken.ID, 'b'),
(AHToken.ID, 'c'),
(AHToken.ID, 'd'),
(AHToken.NEWLINE, None)
])
def testBadIndentation(self):
testBelowLowest1 = """
firstToken
badToken"""
self.assertRaises(LexerError, self.lex, testBelowLowest1)
testBelowLowest2 = """
firstToken where
abc where
d
badToken -- Oh no"""
self.assertRaises(LexerError, self.lex, testBelowLowest2)
testBadIndent1 = """
a where
blockContent
badToken
"""
self.assertRaises(LexerError, self.lex, testBadIndent1)
| mit | 3,729,728,006,217,532,000 | 31.28125 | 80 | 0.488771 | false |
gizmachi/ct_tools | gaol/gaol_lib.py | 1 | 1947 | import json
import urllib
import urllib2
import ssl
import base64
from lib import *
class sslparameters:
sslcontext = None
def get_opener():
try:
opener = urllib2.build_opener(urllib2.HTTPSHandler(context=sslparameters.sslcontext))
except TypeError:
opener = urllib2.build_opener(urllib2.HTTPSHandler())
return opener
def urlopen(url, data=None):
return get_opener().open(url, data)
def get_sth(baseurl):
result = urlopen(baseurl + "gaol/v1/get-sth").read()
return json.loads(result)
def get_entries(baseurl, start, end):
params = urllib.urlencode({"start":start, "end":end})
# try:
result = urlopen(baseurl + "gaol/v1/get-entries?" + params).read()
return json.loads(result)
def get_consistency_proof(baseurl, tree_size1, tree_size2):
# try:
params = urllib.urlencode({"first":tree_size1,
"second":tree_size2})
result = \
urlopen(baseurl + "gaol/v1/get-sth-consistency?" + params).read()
return json.loads(result)["consistency"]
# except urllib2.HTTPError, e:
# print "ERROR:", e.read()
# sys.exit(1)
def extract_original_entry(entry):
leaf_input = base64.decodestring(entry["leaf_input"])
(data_blob, timestamp, issuer_key_hash) = unpack_mtl(leaf_input)
return (data_blob, timestamp)
def make_blob(data):
return base64.b64encode(data)
def add_blob(baseurl, blob):
try:
result = urlopen(baseurl + "gaol/v1/add-blob", json.dumps({"blob" : blob})).read()
return json.loads(result)
except urllib2.HTTPError, e:
return "ERROR " + str(e.code) + " : " + e.read()
# if e.code == 400:
return None
# sys.exit(1)
except ValueError, e:
print "==== FAILED REQUEST ===="
print submission
print "======= RESPONSE ======="
print result
print "========================"
raise e | bsd-3-clause | 5,098,226,471,701,478,000 | 28.515152 | 93 | 0.608115 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.